summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.dockerignore4
-rw-r--r--.github/ISSUE_TEMPLATE.md13
-rw-r--r--.papr.inventory24
-rwxr-xr-x.papr.sh32
-rw-r--r--.papr.yml42
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.tito/releasers.conf6
-rw-r--r--.travis.yml1
-rw-r--r--BUILD.md47
-rw-r--r--CONTRIBUTING.md198
-rw-r--r--DEPLOYMENT_TYPES.md32
-rw-r--r--Dockerfile42
-rw-r--r--Dockerfile.rhel741
-rw-r--r--README.md10
-rw-r--r--README_ANSIBLE_CONTAINER.md15
-rw-r--r--README_CONTAINERIZED_INSTALLATION.md2
-rw-r--r--README_CONTAINER_IMAGE.md77
-rw-r--r--README_libvirt.md8
-rw-r--r--ansible.cfg13
-rwxr-xr-xbin/cluster1
-rw-r--r--callback_plugins/aa_version_requirement.py20
-rw-r--r--docs/best_practices_guide.adoc39
-rw-r--r--docs/proposals/role_decomposition.md353
-rw-r--r--docs/pull_requests.md95
-rw-r--r--docs/repo_structure.md67
-rw-r--r--examples/README.md93
-rw-r--r--examples/certificate-check-upload.yaml53
-rw-r--r--examples/certificate-check-volume.yaml60
-rw-r--r--examples/scheduled-certcheck-upload.yaml53
-rw-r--r--examples/scheduled-certcheck-volume.yaml58
-rw-r--r--filter_plugins/oo_filters.py138
-rw-r--r--filter_plugins/openshift_node.py1
-rw-r--r--filter_plugins/openshift_version.py2
-rwxr-xr-xhack/build-images.sh26
-rw-r--r--hack/hooks/README.md37
-rw-r--r--hack/hooks/verify_generated_modules/README.md19
-rwxr-xr-xhack/hooks/verify_generated_modules/pre-commit55
-rwxr-xr-xhack/push-release.sh56
-rw-r--r--images/installer/Dockerfile43
-rw-r--r--images/installer/Dockerfile.rhel746
-rw-r--r--images/installer/README_CONTAINER_IMAGE.md48
-rw-r--r--images/installer/root/exports/config.json.template234
-rw-r--r--images/installer/root/exports/manifest.json12
-rw-r--r--images/installer/root/exports/service.template6
-rw-r--r--images/installer/root/exports/tmpfiles.template2
-rwxr-xr-ximages/installer/root/usr/local/bin/entrypoint17
-rwxr-xr-ximages/installer/root/usr/local/bin/run46
-rwxr-xr-ximages/installer/root/usr/local/bin/run-system-container.sh4
-rwxr-xr-ximages/installer/root/usr/local/bin/usage33
-rwxr-xr-ximages/installer/root/usr/local/bin/usage.ocp33
-rwxr-xr-ximages/installer/root/usr/local/bin/user_setup17
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example56
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example59
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example46
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example52
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example63
-rw-r--r--inventory/byo/hosts.origin.example162
-rw-r--r--inventory/byo/hosts.ose.example163
-rw-r--r--library/kubeclient_ca.py2
-rwxr-xr-xlibrary/modify_yaml.py2
-rw-r--r--lookup_plugins/oo_option.py2
-rw-r--r--openshift-ansible.spec1170
-rw-r--r--playbooks/adhoc/contiv/delete_contiv.yml2
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml2
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py1
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml55
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/byo/openshift-cfme/config.yml8
-rw-r--r--playbooks/byo/openshift-cfme/uninstall.yml6
-rw-r--r--playbooks/byo/openshift-checks/README.md66
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/default.yaml (renamed from playbooks/certificate_expiry/default.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml40
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml (renamed from playbooks/certificate_expiry/easy-mode.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml (renamed from playbooks/certificate_expiry/html_and_json_default_paths.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml16
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml (renamed from playbooks/certificate_expiry/longer-warning-period-json-results.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml (renamed from playbooks/certificate_expiry/longer_warning_period.yaml)0
l---------playbooks/byo/openshift-checks/certificate_expiry/roles1
-rw-r--r--playbooks/byo/openshift-checks/health.yml3
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml3
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml5
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml24
-rw-r--r--playbooks/byo/openshift-cluster/initialize_groups.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml24
-rw-r--r--playbooks/byo/openshift-cluster/openshift-metrics.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-router-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml12
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md5
l---------playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml35
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml104
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml101
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/README.md10
l---------playbooks/byo/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-etcd/config.yml14
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml8
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml4
-rw-r--r--playbooks/byo/openshift-glusterfs/README.md98
-rw-r--r--playbooks/byo/openshift-glusterfs/config.yml10
l---------playbooks/byo/openshift-glusterfs/filter_plugins1
l---------playbooks/byo/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-glusterfs/registry.yml10
l---------playbooks/byo/openshift-glusterfs/roles (renamed from playbooks/byo/openshift-preflight/roles)0
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml24
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml42
-rw-r--r--playbooks/byo/openshift-node/restart.yml4
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml24
-rw-r--r--playbooks/byo/openshift-preflight/README.md43
-rw-r--r--playbooks/byo/openshift-preflight/check.yml13
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml5
l---------playbooks/certificate_expiry1
l---------playbooks/certificate_expiry/roles1
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
l---------playbooks/common/openshift-cfme/filter_plugins1
l---------playbooks/common/openshift-cfme/library1
l---------playbooks/common/openshift-cfme/roles1
-rw-r--r--playbooks/common/openshift-cfme/uninstall.yml8
-rw-r--r--playbooks/common/openshift-checks/health.yml16
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml16
l---------playbooks/common/openshift-checks/roles1
-rw-r--r--playbooks/common/openshift-cluster/config.yml70
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml17
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml109
-rw-r--r--playbooks/common/openshift-cluster/initialize_oo_option_facts.yml27
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml11
-rw-r--r--playbooks/common/openshift-cluster/openshift_provisioners.yml5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml158
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/ca.yml)156
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml3
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml62
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml8
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml21
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml (renamed from playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml)19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh (renamed from playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml5
l---------playbooks/common/openshift-cluster/upgrades/docker/roles (renamed from playbooks/byo/openshift-cluster/upgrades/docker/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml96
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml179
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml70
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml67
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_3/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml112
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_4/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml110
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml110
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml110
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml4
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml4
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml120
-rw-r--r--playbooks/common/openshift-etcd/restart.yml2
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml40
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
l---------playbooks/common/openshift-glusterfs/roles1
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml1
-rw-r--r--playbooks/common/openshift-master/config.yml26
-rw-r--r--playbooks/common/openshift-master/restart.yml6
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml1
-rw-r--r--playbooks/common/openshift-master/scaleup.yml18
-rw-r--r--playbooks/common/openshift-node/config.yml7
-rw-r--r--playbooks/common/openshift-node/network_manager.yml28
-rw-r--r--playbooks/common/openshift-node/restart.yml7
-rw-r--r--playbooks/common/openshift-node/scaleup.yml16
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml12
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml10
-rw-r--r--requirements.txt12
-rw-r--r--roles/ansible_service_broker/defaults/main.yml10
-rw-r--r--roles/ansible_service_broker/meta/main.yml15
-rw-r--r--roles/ansible_service_broker/tasks/install.yml280
-rw-r--r--roles/ansible_service_broker/tasks/main.yml8
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml65
-rw-r--r--roles/ansible_service_broker/tasks/validate_facts.yml15
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml14
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml14
-rw-r--r--roles/calico/README.md9
-rw-r--r--roles/calico/defaults/main.yaml13
-rw-r--r--roles/calico/handlers/main.yml8
-rw-r--r--roles/calico/tasks/main.yml66
-rw-r--r--roles/calico/templates/10-calico.conf.j2 (renamed from roles/calico/templates/calico.conf.j2)2
-rw-r--r--roles/calico/templates/calico.service.j216
-rw-r--r--roles/calico/templates/calicoctl.cfg.j2 (renamed from roles/calico/templates/calico.cfg.j2)2
-rw-r--r--roles/calico_master/README.md12
-rw-r--r--roles/calico_master/defaults/main.yaml5
-rw-r--r--roles/calico_master/tasks/main.yml19
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j28
-rw-r--r--roles/cockpit-ui/tasks/main.yml13
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/contiv/defaults/main.yml25
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml45
-rw-r--r--roles/contiv/tasks/netmaster.yml2
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml2
-rw-r--r--roles/contiv/tasks/netplugin.yml6
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml35
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml5
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml18
-rw-r--r--roles/contiv/templates/aci-gw.service2
-rw-r--r--roles/contiv/templates/netplugin.j24
-rw-r--r--roles/contiv_auth_proxy/README.md29
-rw-r--r--roles/contiv_auth_proxy/defaults/main.yml11
-rw-r--r--roles/contiv_auth_proxy/files/auth-proxy.service13
-rw-r--r--roles/contiv_auth_proxy/handlers/main.yml2
-rw-r--r--roles/contiv_auth_proxy/tasks/cleanup.yml10
-rw-r--r--roles/contiv_auth_proxy/tasks/main.yml37
-rw-r--r--roles/contiv_auth_proxy/templates/auth_proxy.j236
-rw-r--r--roles/contiv_auth_proxy/tests/inventory1
-rw-r--r--roles/contiv_auth_proxy/tests/test.yml5
-rw-r--r--roles/contiv_auth_proxy/vars/main.yml2
-rw-r--r--roles/contiv_facts/defaults/main.yaml3
-rw-r--r--roles/contiv_facts/tasks/main.yml6
-rw-r--r--roles/contiv_facts/tasks/rpm.yml4
-rw-r--r--roles/dns/templates/named.service.j28
-rw-r--r--roles/docker/README.md9
-rw-r--r--roles/docker/handlers/main.yml6
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/main.yml122
-rw-r--r--roles/docker/tasks/package_docker.yml134
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml176
-rw-r--r--roles/docker/templates/daemon.json20
-rw-r--r--roles/docker/templates/systemcontainercustom.conf.j217
-rw-r--r--roles/docker/vars/main.yml4
-rw-r--r--roles/etcd/defaults/main.yaml7
-rw-r--r--roles/etcd/files/etcdctl.sh11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml120
-rw-r--r--roles/etcd/tasks/system_container.yml62
-rw-r--r--roles/etcd/templates/etcd.conf.j22
-rw-r--r--roles/etcd/templates/etcd.docker.service12
-rw-r--r--roles/etcd_client_certificates/tasks/main.yml4
-rw-r--r--roles/etcd_common/README.md37
-rw-r--r--roles/etcd_common/defaults/main.yml30
-rw-r--r--roles/etcd_common/tasks/backup.yml102
-rw-r--r--roles/etcd_common/tasks/drop_etcdctl.yml (renamed from roles/etcd/tasks/etcdctl.yml)6
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_common/templates/etcdctl.sh.j212
-rw-r--r--roles/etcd_common/vars/main.yml4
-rw-r--r--roles/etcd_migrate/README.md53
-rw-r--r--roles/etcd_migrate/defaults/main.yml3
-rw-r--r--roles/etcd_migrate/meta/main.yml17
-rw-r--r--roles/etcd_migrate/tasks/check.yml59
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_health.yml23
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_status.yml32
-rw-r--r--roles/etcd_migrate/tasks/configure.yml13
-rw-r--r--roles/etcd_migrate/tasks/main.yml25
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml64
-rw-r--r--roles/etcd_server_certificates/meta/main.yml2
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml41
-rw-r--r--roles/etcd_upgrade/defaults/main.yml3
-rw-r--r--roles/etcd_upgrade/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/tasks/main.yml14
-rw-r--r--roles/etcd_upgrade/tasks/upgrade.yml11
-rw-r--r--roles/etcd_upgrade/tasks/upgrade_image.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml)16
-rw-r--r--roles/etcd_upgrade/tasks/upgrade_rpm.yml32
-rw-r--r--roles/etcd_upgrade/vars/main.yml3
-rw-r--r--roles/flannel/handlers/main.yml8
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py139
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py126
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py124
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py126
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py237
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py196
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py21
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py138
-rw-r--r--roles/lib_openshift/library/oc_configmap.py128
-rw-r--r--roles/lib_openshift/library/oc_edit.py124
-rw-r--r--roles/lib_openshift/library/oc_env.py124
-rw-r--r--roles/lib_openshift/library/oc_group.py124
-rw-r--r--roles/lib_openshift/library/oc_image.py124
-rw-r--r--roles/lib_openshift/library/oc_label.py128
-rw-r--r--roles/lib_openshift/library/oc_obj.py178
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py126
-rw-r--r--roles/lib_openshift/library/oc_process.py138
-rw-r--r--roles/lib_openshift/library/oc_project.py124
-rw-r--r--roles/lib_openshift/library/oc_pvc.py124
-rw-r--r--roles/lib_openshift/library/oc_route.py124
-rw-r--r--roles/lib_openshift/library/oc_scale.py124
-rw-r--r--roles/lib_openshift/library/oc_secret.py157
-rw-r--r--roles/lib_openshift/library/oc_service.py198
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py124
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py124
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py1685
-rw-r--r--roles/lib_openshift/library/oc_user.py124
-rw-r--r--roles/lib_openshift/library/oc_version.py124
-rw-r--r--roles/lib_openshift/library/oc_volume.py136
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py21
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_secret.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_service.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_storageclass.py32
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py11
-rw-r--r--roles/lib_openshift/src/class/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py45
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py4
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py6
-rw-r--r--roles/lib_openshift/src/class/oc_configmap.py4
-rw-r--r--roles/lib_openshift/src/class/oc_label.py4
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py46
-rw-r--r--roles/lib_openshift/src/class/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/src/class/oc_process.py14
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py21
-rw-r--r--roles/lib_openshift/src/class/oc_service.py5
-rw-r--r--roles/lib_openshift/src/class/oc_storageclass.py155
-rw-r--r--roles/lib_openshift/src/doc/obj6
-rw-r--r--roles/lib_openshift/src/doc/secret6
-rw-r--r--roles/lib_openshift/src/doc/service7
-rw-r--r--roles/lib_openshift/src/doc/storageclass86
-rw-r--r--roles/lib_openshift/src/doc/volume12
-rwxr-xr-xroles/lib_openshift/src/generate.py9
-rw-r--r--roles/lib_openshift/src/lib/base.py116
-rw-r--r--roles/lib_openshift/src/lib/rule.py8
-rw-r--r--roles/lib_openshift/src/lib/secret.py5
-rw-r--r--roles/lib_openshift/src/lib/service.py61
-rw-r--r--roles/lib_openshift/src/lib/storageclass.py73
-rw-r--r--roles/lib_openshift/src/sources.yml11
-rw-r--r--roles/lib_openshift/src/test/integration/filter_plugins/filters.py1
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_label.yml2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_obj.yml207
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_service.yml5
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_storageclass.yml87
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_user.yml2
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py15
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py10
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py28
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py4
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py182
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_storageclass.py93
-rw-r--r--roles/lib_utils/library/repoquery.py32
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py1
-rw-r--r--roles/lib_utils/src/class/repoquery.py30
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py3
-rw-r--r--roles/nuage_master/defaults/main.yaml4
-rw-r--r--roles/nuage_master/tasks/main.yaml8
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j210
-rw-r--r--roles/nuage_node/tasks/main.yaml15
-rw-r--r--roles/nuage_node/templates/vsp-openshift.j22
-rw-r--r--roles/nuage_node/vars/main.yaml3
-rw-r--r--roles/openshift_ca/tasks/main.yml55
-rw-r--r--roles/openshift_ca/vars/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/README.md157
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py3
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py25
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_certificate_expiry/test/conftest.py5
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py3
-rw-r--r--roles/openshift_cfme/README.md404
-rw-r--r--roles/openshift_cfme/defaults/main.yml38
-rw-r--r--roles/openshift_cfme/files/miq-template.yaml566
-rw-r--r--roles/openshift_cfme/files/openshift_cfme.exports3
-rw-r--r--roles/openshift_cfme/handlers/main.yml42
-rw-r--r--roles/openshift_cfme/img/CFMEBasicDeployment.pngbin0 -> 38316 bytes
-rw-r--r--roles/openshift_cfme/meta/main.yml20
-rw-r--r--roles/openshift_cfme/tasks/create_pvs.yml36
-rw-r--r--roles/openshift_cfme/tasks/main.yml148
-rw-r--r--roles/openshift_cfme/tasks/tune_masters.yml12
-rw-r--r--roles/openshift_cfme/tasks/uninstall.yml43
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j213
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py2
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml2
-rw-r--r--roles/openshift_common/tasks/main.yml10
-rw-r--r--roles/openshift_default_storage_class/README.md39
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml19
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml15
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml9
-rw-r--r--roles/openshift_default_storage_class/vars/main.yml (renamed from roles/openshift_etcd_ca/tasks/main.yml)0
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml4
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml3
-rwxr-xr-xroles/openshift_examples/examples-sync.sh15
l---------roles/openshift_examples/files/examples/latest2
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json15
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json67
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md1
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/amp.yml1261
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast.yml157
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-example.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json)2
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-pgsql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json)25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/httpd.json274
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json15
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json15
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/pvc.yml49
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/wildcard.yml158
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json535
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-basic.json21
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent-ssl.json30
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-ssl.json26
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-basic.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent-ssl.json569
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent.json386
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-ssl.json521
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-basic.json50
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-https.json59
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql-persistent.json93
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql.json92
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql-persistent.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json73
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json63
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json308
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-amq-s2i.json40
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-basic-s2i.json20
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-https-s2i.json27
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-amq-s2i.json66
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-basic-s2i.json45
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-https-s2i.json52
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-amq-s2i.json748
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-basic-s2i.json376
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-https-s2i.json517
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-s2i.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-basic-s2i.json44
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-https-s2i.json55
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-persistent-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-s2i.json85
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-persistent-s2i.json87
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-persistent-s2i.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-s2i.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-sso-s2i.json72
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-third-party-db-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json)350
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-s2i.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-basic-s2i.json44
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-https-s2i.json55
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-persistent-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-s2i.json85
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-persistent-s2i.json87
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-persistent-s2i.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-s2i.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-sso-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-third-party-db-s2i.json657
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-basic-s2i.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-https-s2i.json48
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-s2i.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-basic-s2i.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-https-s2i.json48
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-s2i.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-s2i.json651
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-s2i.json649
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json93
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-s2i.json91
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json90
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-s2i.json88
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-basic-s2i.json46
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-persistent-s2i.json82
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-s2i.json81
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-persistent-s2i.json79
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json1156
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-s2i.json1034
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json1126
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-s2i.json1004
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-basic-s2i.json383
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-persistent-s2i.json860
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-s2i.json783
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-persistent-s2i.json830
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-s2i.json753
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-https.json50
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql-persistent.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql-persistent.json72
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-https.json544
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql-persistent.json799
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql-persistent.json773
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql.json741
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml210
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json67
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md1
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/amp.yml1261
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast.yml157
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/httpd.json274
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/pvc.yml49
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/wildcard.yml158
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json535
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json21
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json30
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json26
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-basic.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent-ssl.json569
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent.json386
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-ssl.json521
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json50
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json59
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json93
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json92
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json73
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json63
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json308
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json40
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json20
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json27
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json66
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json45
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json52
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-amq-s2i.json748
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-basic-s2i.json376
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-https-s2i.json517
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json44
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json55
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json85
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json87
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json72
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-third-party-db-s2i.json646
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json44
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json55
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json85
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json87
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json86
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json84
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json83
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-third-party-db-s2i.json657
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json48
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json48
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json77
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json74
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-s2i.json651
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-s2i.json649
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json7
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json93
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json91
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json90
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json88
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json46
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json82
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json81
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json79
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json1156
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-s2i.json1034
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json1126
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-s2i.json1004
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-basic-s2i.json383
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-persistent-s2i.json860
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-s2i.json783
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-persistent-s2i.json830
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-s2i.json753
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json50
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json75
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json78
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json72
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json71
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-https.json544
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql-persistent.json799
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql-persistent.json773
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql.json741
-rw-r--r--roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json397
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml)210
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/README.md)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json)12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json)11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json)11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json)10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json)10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json)75
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json)67
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md)1
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml1261
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml157
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json)7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json)7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json)10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json)10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json)9
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json)9
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json333
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json565
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json274
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json)5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json)5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json)11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json)11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml49
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json)13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json)13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml158
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json822
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json)21
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json)30
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json)25
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json)26
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-basic.json334
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent-ssl.json569
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent.json386
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-ssl.json521
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json)50
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json)59
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json)93
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json)92
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json)84
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json)83
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json)73
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json)63
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json940
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json)40
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json)20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json)27
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json)66
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json)45
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json)52
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-amq-s2i.json748
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-basic-s2i.json376
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-https-s2i.json517
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json)75
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json)71
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json)44
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json)55
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json)86
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json)85
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json)87
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json)86
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json)84
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json)83
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json)72
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-third-party-db-s2i.json646
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json)136
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json)134
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json)44
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json)55
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json)86
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json)85
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json)87
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json)86
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json)84
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json)83
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json)74
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-third-party-db-s2i.json657
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json)43
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json)48
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json)77
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json)76
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json)78
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json)77
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json)75
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json)74
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json)43
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json)48
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json)77
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json)76
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json)78
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json)77
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json)75
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json)74
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-s2i.json651
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-basic-s2i.json319
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-https-s2i.json438
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json715
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-s2i.json674
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json718
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-s2i.json677
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json692
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-s2i.json649
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json)7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json)93
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json)91
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json)90
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json)88
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json)46
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json)82
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json)81
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json)79
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json)78
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json1156
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-s2i.json1034
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json1126
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-s2i.json1004
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-basic-s2i.json383
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-persistent-s2i.json860
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-s2i.json783
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-persistent-s2i.json830
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-s2i.json753
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json)50
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json)75
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json)78
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json)72
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json)71
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-https.json544
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql-persistent.json799
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql.json767
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql-persistent.json773
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql.json741
-rw-r--r--roles/openshift_examples/tasks/main.yml16
-rw-r--r--roles/openshift_excluder/README.md62
-rw-r--r--roles/openshift_excluder/defaults/main.yml19
-rw-r--r--roles/openshift_excluder/meta/main.yml5
-rw-r--r--roles/openshift_excluder/tasks/disable.yml65
-rw-r--r--roles/openshift_excluder/tasks/enable.yml20
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml42
-rw-r--r--roles/openshift_excluder/tasks/init.yml12
-rw-r--r--roles/openshift_excluder/tasks/install.yml27
-rw-r--r--roles/openshift_excluder/tasks/main.yml38
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml38
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml32
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml12
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_facts/defaults/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py101
-rw-r--r--roles/openshift_facts/tasks/main.yml35
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py97
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py113
-rw-r--r--[-rwxr-xr-x]roles/openshift_health_checker/library/aos_version.py242
-rw-r--r--[-rwxr-xr-x]roles/openshift_health_checker/library/check_yum_update.py1
-rw-r--r--roles/openshift_health_checker/library/docker_info.py2
-rw-r--r--roles/openshift_health_checker/library/etcdkeysize.py122
-rw-r--r--roles/openshift_health_checker/library/ocutil.py74
-rw-r--r--roles/openshift_health_checker/library/rpm_version.py127
-rw-r--r--roles/openshift_health_checker/library/search_journalctl.py150
-rw-r--r--roles/openshift_health_checker/meta/main.yml2
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py98
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py114
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py257
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py298
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py85
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py44
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py55
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/__init__.py0
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/curator.py54
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py210
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd.py167
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py226
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py92
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py130
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py50
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py59
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py77
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py122
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py252
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py196
-rw-r--r--roles/openshift_health_checker/test/conftest.py10
-rw-r--r--roles/openshift_health_checker/test/curator_test.py68
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py177
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py278
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py305
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py180
-rw-r--r--roles/openshift_health_checker/test/etcd_imagedata_size_test.py328
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py74
-rw-r--r--roles/openshift_health_checker/test/etcd_volume_test.py146
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py109
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py218
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py165
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py170
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py127
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py4
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py86
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py19
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py5
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py123
-rw-r--r--roles/openshift_health_checker/test/rpm_version_test.py82
-rw-r--r--roles/openshift_health_checker/test/search_journalctl_test.py157
-rw-r--r--roles/openshift_hosted/README.md8
-rw-r--r--roles/openshift_hosted/defaults/main.yml10
-rw-r--r--roles/openshift_hosted/meta/main.yml5
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml42
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml3
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml92
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml16
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml79
-rw-r--r--roles/openshift_hosted/templates/registry_config.j29
-rw-r--r--roles/openshift_hosted_logging/README.md2
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml12
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml3
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml)4
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml)0
-rw-r--r--roles/openshift_loadbalancer/README.md2
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j28
-rw-r--r--roles/openshift_logging/README.md67
-rw-r--r--roles/openshift_logging/defaults/main.yml69
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py2
-rw-r--r--roles/openshift_logging/handlers/main.yml9
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py4
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml122
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml51
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml136
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml3
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml47
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml77
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml71
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml87
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml53
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml146
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml60
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml338
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml54
-rw-r--r--roles/openshift_logging/tasks/main.yaml24
-rw-r--r--roles/openshift_logging/tasks/oc_secret.yaml7
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml38
-rw-r--r--roles/openshift_logging/tasks/procure_shared_key.yaml25
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml133
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml133
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml48
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/es-storage-emptydir.partial1
-rw-r--r--roles/openshift_logging/templates/es-storage-hostpath.partial2
-rw-r--r--roles/openshift_logging/templates/es-storage-pvc.partial2
-rw-r--r--roles/openshift_logging/templates/fluentd.j2149
-rw-r--r--roles/openshift_logging/templates/kibana.j2116
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j228
-rw-r--r--roles/openshift_logging/vars/main.yaml12
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml33
-rw-r--r--roles/openshift_logging_curator/files/curator.yml (renamed from roles/openshift_logging/files/curator.yml)0
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml15
-rw-r--r--roles/openshift_logging_curator/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml113
-rw-r--r--roles/openshift_logging_curator/templates/curator.j2 (renamed from roles/openshift_logging/templates/curator.j2)11
-rw-r--r--roles/openshift_logging_curator/vars/main.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml57
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh (renamed from roles/openshift_logging/files/es_migration.sh)0
-rw-r--r--roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml15
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml19
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml347
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch-logging.yml.j2)47
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch.yml.j2)14
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j2 (renamed from roles/openshift_logging/templates/es.j2)35
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j230
-rw-r--r--roles/openshift_logging_elasticsearch/templates/rolebinding.j2 (renamed from roles/openshift_logging/templates/rolebinding.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 (renamed from roles/openshift_logging/templates/route_reencrypt.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml12
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml59
-rw-r--r--roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml (renamed from roles/openshift_logging/files/fluentd-throttle-config.yaml)0
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf (renamed from roles/openshift_logging/files/secure-forward.conf)0
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml15
-rw-r--r--roles/openshift_logging_fluentd/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml10
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml202
-rw-r--r--roles/openshift_logging_fluentd/templates/fluent.conf.j278
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j2147
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml41
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml15
-rw-r--r--roles/openshift_logging_kibana/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml255
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j2150
-rw-r--r--roles/openshift_logging_kibana/templates/oauth-client.j2 (renamed from roles/openshift_logging/templates/oauth-client.j2)7
-rw-r--r--roles/openshift_logging_kibana/templates/route_reencrypt.j236
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml3
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml49
-rw-r--r--roles/openshift_logging_mux/files/fluent.conf (renamed from roles/openshift_logging/files/fluent.conf)0
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml15
-rw-r--r--roles/openshift_logging_mux/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml201
-rw-r--r--roles/openshift_logging_mux/templates/mux.j2133
-rw-r--r--roles/openshift_logging_mux/vars/main.yml3
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml88
-rw-r--r--roles/openshift_manageiq/vars/main.yml64
-rw-r--r--roles/openshift_master/README.md24
-rw-r--r--roles/openshift_master/defaults/main.yml4
-rw-r--r--roles/openshift_master/files/atomic-openshift-master.service23
-rw-r--r--roles/openshift_master/files/origin-master.service23
l---------roles/openshift_master/tasks/files1
-rw-r--r--roles/openshift_master/tasks/main.yml41
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml10
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j23
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j210
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j210
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j217
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j210
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j23
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j23
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml21
-rw-r--r--roles/openshift_master_facts/defaults/main.yml22
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py22
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py4
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py2
-rw-r--r--roles/openshift_master_facts/tasks/main.yml8
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py7
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py7
-rw-r--r--roles/openshift_metrics/README.md5
-rw-r--r--roles/openshift_metrics/defaults/main.yaml9
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh52
-rw-r--r--roles/openshift_metrics/handlers/main.yml9
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml85
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_certificates.yaml40
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_secrets.yaml14
-rw-r--r--roles/openshift_metrics/tasks/generate_rolebindings.yaml24
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml37
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml5
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml8
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml44
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml43
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml16
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml2
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml2
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml15
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml7
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j21
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j225
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_role.j215
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j254
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j291
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_role.j225
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j27
-rw-r--r--roles/openshift_metrics/templates/heapster.j230
-rw-r--r--roles/openshift_metrics/templates/pvc.j210
-rw-r--r--roles/openshift_metrics/templates/route.j22
-rw-r--r--roles/openshift_metrics/templates/service.j26
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_node/README.md9
-rw-r--r--roles/openshift_node/defaults/main.yml9
-rw-r--r--roles/openshift_node/handlers/main.yml29
-rw-r--r--roles/openshift_node/meta/main.yml12
-rw-r--r--roles/openshift_node/tasks/main.yml69
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml60
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml42
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml25
-rw-r--r--roles/openshift_node/templates/node.service.j231
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service18
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml6
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml5
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml1
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh34
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml11
-rw-r--r--roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j22
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j26
-rw-r--r--roles/openshift_node_upgrade/README.md6
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml1
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml25
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml25
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml114
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml (renamed from roles/openshift_node_upgrade/tasks/docker/restart.yml)20
-rw-r--r--roles/openshift_node_upgrade/tasks/rpm_upgrade.yml15
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml15
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j231
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service20
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j26
-rw-r--r--roles/openshift_provisioners/README.md29
-rw-r--r--roles/openshift_provisioners/defaults/main.yaml12
-rw-r--r--roles/openshift_provisioners/meta/main.yaml16
-rw-r--r--roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml19
-rw-r--r--roles/openshift_provisioners/tasks/generate_secrets.yaml14
-rw-r--r--roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml12
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml70
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml55
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml24
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml27
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml (renamed from roles/openshift_logging/tasks/oc_apply.yaml)7
-rw-r--r--roles/openshift_provisioners/tasks/start_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml43
-rw-r--r--roles/openshift_provisioners/templates/clusterrolebinding.j2 (renamed from roles/openshift_logging/templates/clusterrolebinding.j2)8
-rw-r--r--roles/openshift_provisioners/templates/efs.j258
-rw-r--r--roles/openshift_provisioners/templates/pv.j232
-rw-r--r--roles/openshift_provisioners/templates/pvc.j2 (renamed from roles/openshift_logging/templates/pvc.j2)7
-rw-r--r--roles/openshift_provisioners/templates/secret.j215
-rw-r--r--roles/openshift_provisioners/templates/serviceaccount.j2 (renamed from roles/openshift_logging/templates/serviceaccount.j2)0
-rw-r--r--roles/openshift_repos/README.md8
-rw-r--r--roles/openshift_repos/defaults/main.yaml1
-rw-r--r--roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo2
-rw-r--r--roles/openshift_repos/tasks/main.yaml59
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml38
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml12
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml3
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml199
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml38
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js2
-rw-r--r--roles/openshift_service_catalog/meta/main.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml70
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml239
-rw-r--r--roles/openshift_service_catalog/tasks/main.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml56
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml22
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml206
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j279
-rw-r--r--roles/openshift_service_catalog/templates/api_server_route.j214
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j247
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/sc_role_patching.j226
-rw-r--r--roles/openshift_service_catalog/vars/default_images.yml3
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_storage_glusterfs/README.md156
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml54
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml143
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml253
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml31
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml113
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml81
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml56
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml133
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml21
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j21
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml235
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml21
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml54
-rw-r--r--roles/os_firewall/README.md2
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml8
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml2
-rw-r--r--roles/rhel_subscribe/meta/main.yml3
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml9
-rw-r--r--roles/rhel_subscribe/tasks/main.yml10
-rw-r--r--setup.py48
-rw-r--r--test-requirements.txt23
-rw-r--r--test/integration/README.md39
-rwxr-xr-xtest/integration/build-images.sh101
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile32
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec33
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec32
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec57
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec57
-rw-r--r--test/integration/openshift_health_checker/builds/test-target-base/Dockerfile2
-rw-r--r--test/integration/openshift_health_checker/common.go99
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml32
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml20
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml31
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml38
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml28
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml34
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml32
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml35
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml35
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml20
l---------test/integration/openshift_health_checker/preflight/playbooks/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/preflight_test.go105
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml48
-rw-r--r--test/integration/openshift_health_checker/teardown_container.yml24
-rwxr-xr-xtest/integration/run-tests.sh80
-rw-r--r--test/openshift_version_tests.py2
-rw-r--r--test/unit/modify_yaml_tests.py (renamed from test/modify_yaml_tests.py)2
-rw-r--r--tox.ini7
-rw-r--r--utils/etc/ansible.cfg6
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py14
-rw-r--r--utils/src/ooinstall/cli_installer.py6
-rw-r--r--utils/src/ooinstall/variants.py7
1323 files changed, 133792 insertions, 13237 deletions
diff --git a/.dockerignore b/.dockerignore
index 968811df5..0a70c5bfa 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,8 +1,12 @@
.*
bin
docs
+hack
+inventory
test
utils
**/*.md
*.spec
+*.ini
+*.txt
setup*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index cdfd93725..2a4f80a36 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,16 +1,3 @@
-### <HTPASSWD_AUTH>
-
-We are aware of the current issues related to htpasswd_auth failures
-Please downgrade to ansible 2.2.0.0 until a fix is released.
-You can track the status of the bug fix in this issue:
-https://github.com/openshift/openshift-ansible/issues/3111
-Please erase this <HTPASSWD_AUTH> section if it does not apply to you.
-
-Thanks - 2017-01-31
-
-### </HTPASSWD_AUTH>
-
-
#### Description
Provide a brief description of your issue here. For example:
diff --git a/.papr.inventory b/.papr.inventory
new file mode 100644
index 000000000..878d434e2
--- /dev/null
+++ b/.papr.inventory
@@ -0,0 +1,24 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+ansible_ssh_user=root
+ansible_python_interpreter=/usr/bin/python3
+deployment_type=origin
+openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
+openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
+openshift_check_min_host_disk_gb=1.5
+openshift_check_min_host_memory_gb=1.9
+
+[masters]
+ocp-master
+
+[etcd]
+ocp-master
+
+[nodes]
+ocp-master openshift_schedulable=false
+ocp-node1 openshift_node_labels="{'region':'infra'}"
+ocp-node2 openshift_node_labels="{'region':'infra'}"
diff --git a/.papr.sh b/.papr.sh
new file mode 100755
index 000000000..decca625f
--- /dev/null
+++ b/.papr.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -xeuo pipefail
+
+echo "Targeting OpenShift Origin $OPENSHIFT_IMAGE_TAG"
+
+pip install -r requirements.txt
+
+# ping the nodes to check they're responding and register their ostree versions
+ansible -vvv -i .papr.inventory nodes -a 'rpm-ostree status'
+
+upload_journals() {
+ mkdir journals
+ for node in master node1 node2; do
+ ssh ocp-$node 'journalctl --no-pager || true' > journals/ocp-$node.log
+ done
+}
+
+trap upload_journals ERR
+
+# run the actual installer
+# FIXME: override openshift_image_tag defined in the inventory until
+# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
+ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
+
+# run a small subset of origin conformance tests to sanity
+# check the cluster NB: we run it on the master since we may
+# be in a different OSP network
+ssh ocp-master docker run --rm --net=host --privileged \
+ -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c \
+ '"dnf install -y origin-tests && \
+ KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \
+ --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
diff --git a/.papr.yml b/.papr.yml
new file mode 100644
index 000000000..16d6e78b1
--- /dev/null
+++ b/.papr.yml
@@ -0,0 +1,42 @@
+---
+
+# This YAML file is used by PAPR. It details the test
+# environment to provision and the test procedure. For more
+# information on PAPR, see:
+#
+# https://github.com/projectatomic/papr
+#
+# The PAPR YAML specification detailing allowed fields can
+# be found at:
+#
+# https://github.com/projectatomic/papr/blob/master/sample.papr.yml
+
+cluster:
+ hosts:
+ - name: ocp-master
+ distro: fedora/25/atomic
+ - name: ocp-node1
+ distro: fedora/25/atomic
+ - name: ocp-node2
+ distro: fedora/25/atomic
+ container:
+ image: fedora:25
+
+packages:
+ - gcc
+ - python-pip
+ - python-devel
+ - libffi-devel
+ - openssl-devel
+ - redhat-rpm-config
+
+context: 'fedora/25/atomic'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
+
+tests:
+ - ./.papr.sh
+
+artifacts:
+ - journals/
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index d42684482..a667c3f2d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.14-1 ./
+3.7.1-1 ./
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index b52e4fd87..17baaa1bd 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -37,6 +37,12 @@ releaser = tito.release.DistGitReleaser
branches = rhaos-3.6-rhel-7
srpm_disttag = .el7aos
+[aos-3.7]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.7-rhel-7
+srpm_disttag = .el7aos
+
+
[copr-openshift-ansible]
releaser = tito.release.CoprReleaser
project_name = @OpenShiftOnlineOps/openshift-ansible
diff --git a/.travis.yml b/.travis.yml
index 245202139..1c549cec9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,6 +13,7 @@ python:
- "3.5"
install:
+ - pip install --upgrade pip
- pip install tox-travis coveralls
script:
diff --git a/BUILD.md b/BUILD.md
index 15d3d6240..1c270db23 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -1,10 +1,11 @@
-# openshift-ansible RPM Build instructions
+# openshift-ansible build instructions
+
+## Build openshift-ansible RPMs
+
We use tito to make building and tracking revisions easy.
For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page").
-
-## Build openshift-ansible-bin
- Change into openshift-ansible
```
cd openshift-ansible
@@ -23,22 +24,30 @@ tito tag
tito build --rpm
```
+## Build an openshift-ansible container image
+
+**NOTE**: the examples below use "openshift-ansible" as the name of the image to build for simplicity and illustration purposes, and also to prevent potential confusion between custom built images and official releases. See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for details about the released container images for openshift-ansible.
+
+To build a container image of `openshift-ansible` using standalone **Docker**:
+
+ cd openshift-ansible
+ docker build -f images/installer/Dockerfile -t openshift-ansible .
+
+## Build the Atomic System Container
+
+A system container runs using runC instead of Docker and it is managed
+by the [atomic](https://github.com/projectatomic/atomic/) tool. As it
+doesn't require Docker to run, the installer can run on a node of the
+cluster without interfering with the Docker daemon that is configured
+by the installer itself.
+
+The first step is to build the [container image](#build-an-openshift-ansible-container-image)
+as described before. The container image already contains all the
+required files to run as a system container.
+
+Once the container image is built, we can import it into the OSTree
+storage:
-## Build openshift-ansible-inventory
-- Change into openshift-ansible/inventory
-```
-cd openshift-ansible/inventory
-```
-- Build a test package (no tagging needed)
-```
-tito build --test --rpm
-```
-- Tag a new build (bumps version number and adds log entries)
-```
-tito tag
-```
-- Follow the on screen tito instructions to push the tags
-- Build a new package based on the latest tag information
```
-tito build --rpm
+atomic pull --storage ostree docker:openshift-ansible:latest
```
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 50bb09470..1c0fa73ad 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,92 +3,103 @@
Thank you for contributing to OpenShift Ansible. This document explains how the
repository is organized, and how to submit contributions.
-## Introduction
+**Table of Contents**
-Before submitting code changes, get familiarized with these documents:
+<!-- TOC depthFrom:2 depthTo:4 withLinks:1 updateOnSave:1 orderedList:0 -->
-- [Core Concepts](https://github.com/openshift/openshift-ansible/blob/master/docs/core_concepts_guide.adoc)
-- [Best Practices Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/best_practices_guide.adoc)
-- [Style Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/style_guide.adoc)
+- [Introduction](#introduction)
+- [Submitting contributions](#submitting-contributions)
+- [Running tests and other verification tasks](#running-tests-and-other-verification-tasks)
+ - [Running only specific tasks](#running-only-specific-tasks)
+- [Appendix](#appendix)
+ - [Tricks](#tricks)
+ - [Activating a virtualenv managed by tox](#activating-a-virtualenv-managed-by-tox)
+ - [Limiting the unit tests that are run](#limiting-the-unit-tests-that-are-run)
+ - [Finding unused Python code](#finding-unused-python-code)
-## Repository structure
+<!-- /TOC -->
-### Ansible
+## Introduction
-```
-.
-├── inventory Contains dynamic inventory scripts, and examples of
-│ Ansible inventories.
-├── library Contains Python modules used by the playbooks.
-├── playbooks Contains Ansible playbooks targeting multiple use cases.
-└── roles Contains Ansible roles, units of shared behavior among
- playbooks.
-```
+Before submitting code changes, get familiarized with these documents:
-#### Ansible plugins
+- [Core Concepts](docs/core_concepts_guide.adoc)
+- [Best Practices Guide](docs/best_practices_guide.adoc)
+- [Style Guide](docs/style_guide.adoc)
+- [Repository Structure](docs/repo_structure.md)
-These are plugins used in playbooks and roles:
+Please consider opening an issue or discussing on an existing one if you are
+planning to work on something larger, to make sure your time investment is
+something that can be merged to the repository.
-```
-.
-├── ansible-profile
-├── callback_plugins
-├── filter_plugins
-└── lookup_plugins
-```
+## Submitting contributions
-### Scripts
+1. [Fork](https://help.github.com/articles/fork-a-repo/) this repository and
+ [create a work branch in your fork](https://help.github.com/articles/github-flow/).
+2. Go through the documents mentioned in the [introduction](#introduction).
+3. Make changes and commit. You may want to review your changes and
+ [run tests](#running-tests-and-other-verification-tasks) before pushing your
+ branch.
+4. [Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/).
+ Give it a meaningful title explaining the changes you are proposing, and
+ then add further details in the description.
+
+One of the repository maintainers will then review the PR and trigger tests, and
+possibly start a discussion that goes on until the PR is ready to be merged.
+This process is further explained in the
+[Pull Request process](docs/pull_requests.md) document.
+
+If you get no timely feedback from a project contributor / maintainer, sorry for
+the delay. You can help us speed up triaging, reviewing and eventually merging
+contributions by requesting a review or tagging in a comment
+[someone who has worked on the files](https://help.github.com/articles/tracing-changes-in-a-file/)
+you're proposing changes to.
-```
-.
-├── bin [DEPRECATED] Contains the `bin/cluster` script, a
-│ wrapper around the Ansible playbooks that ensures proper
-│ configuration, and facilitates installing, updating,
-│ destroying and configuring OpenShift clusters.
-│ Note: this tool is kept in the repository for legacy
-│ reasons and will be removed at some point.
-└── utils Contains the `atomic-openshift-installer` command, an
- interactive CLI utility to install OpenShift across a
- set of hosts.
-```
+---
-### Documentation
+**Note**: during the review process, you may add new commits to address review
+comments or change existing commits. However, before getting your PR merged,
+please [squash commits](https://help.github.com/articles/about-git-rebase/) to a
+minimum set of meaningful commits.
-```
-.
-└── docs Contains documentation for this repository.
-```
+If you've broken your work up into a set of sequential changes and each commit
+pass the tests on their own then that's fine. If you've got commits fixing typos
+or other problems introduced by previous commits in the same PR, then those
+should be squashed before merging.
-### Tests
+If you are new to Git, these links might help:
-```
-.
-└── test Contains tests.
-```
+- https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History
+- http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html
-## Building RPMs
+---
-See the [RPM build instructions](BUILD.md).
+## Running tests and other verification tasks
-## Running tests
+We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where
+tests and other verification tasks are run. We use
+[`pytest`](https://docs.pytest.org/) as our test runner.
-We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
-tests. Alternatively, tests can be run using
-[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
-parallel.
+Alternatively to `tox`, one can use
+[`detox`](https://pypi.python.org/pypi/detox/) for running verification tasks in
+parallel. Note that while `detox` may be useful in development to make use of
+multiple cores, it can be buggy at times and produce flakes, thus we do not use
+it in our [CI](docs/continuous_integration.md) jobs.
-Note: while `detox` may be useful in development to make use of multiple cores,
-it can be buggy at times and produce flakes, thus we do not use it in our CI.
+```
+pip install tox
+```
+To run all tests and verification tasks:
```
-pip install tox detox
+tox
```
---
-Note: before running `tox` or `detox`, ensure that the only virtualenvs within
-the repository root are the ones managed by `tox`, those in a `.tox`
+**Note**: before running `tox` or `detox`, ensure that the only virtualenvs
+within the repository root are the ones managed by `tox`, those in a `.tox`
subdirectory.
Use this command to list paths that are likely part of a virtualenv not managed
@@ -105,45 +116,52 @@ potentially fail.
---
-List the test environments available:
+### Running only specific tasks
+
+The [tox configuration](tox.ini) describes environments based on either Python 2
+or Python 3. Each environment is associated with a command that is executed in
+the context of a virtualenv, with a specific version of Python, installed
+dependencies, environment variables and so on. To list the environments
+available:
```
tox -l
```
-Run all of the tests and linters with:
+To run the command of a particular environment, e.g., `flake8` on Python 2.7:
```
-tox
+tox -e py27-flake8
```
-Run all of the tests linters in parallel (may flake):
+To run the command of a particular environment in a clean virtualenv, e.g.,
+`pylint` on Python 3.5:
```
-detox
+tox -re py35-pylint
```
-### Run only unit tests or some specific linter
+The `-r` flag recreates existing environments, useful to force dependencies to
+be reinstalled.
-Run a particular test environment (`flake8` on Python 2.7 in this case):
+## Appendix
-```
-tox -e py27-flake8
-```
+### Tricks
-Run a particular test environment in a clean virtualenv (`pylint` on Python 3.5
-in this case):
+Here are some useful tips that might improve your workflow while working on this repository.
-```
-tox -re py35-pylint
-```
+#### Git Hooks
-### Tricks
+Git hooks are included in this repository to aid in development. Check
+out the README in the
+[hack/hooks](http://github.com/openshift/openshift-ansible/blob/master/hack/hooks/README.md)
+directory for more information.
#### Activating a virtualenv managed by tox
-If you want to enter a virtualenv created by tox to do additional
-testing/debugging (py27-flake8 env in this case):
+If you want to enter a virtualenv created by tox to do additional debugging, you
+can activate it just like any other virtualenv (py27-flake8 environment in this
+example):
```
source .tox/py27-flake8/bin/activate
@@ -182,29 +200,7 @@ $ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py -k te
Among other things, this can be used for instance to see the coverage levels of
individual modules as we work on improving tests.
-## Submitting contributions
-
-1. Go through the guides from the [introduction](#Introduction).
-2. Fork this repository, and create a work branch in your fork.
-3. Make changes and commit. You may want to review your changes and run tests
- before pushing your branch.
-4. Open a Pull Request.
-
-One of the repository maintainers will then review the PR and submit it for
-testing.
-
-The `default` test job is publicly accessible at
-https://ci.openshift.redhat.com/jenkins/job/openshift-ansible/. The other jobs
-are run on a different Jenkins host that is not publicly accessible, however the
-test results are posted to S3 buckets when complete.
-
-The test output of each job is also posted to the Pull Request as comments.
-
----
-
-## Appendix
-
-### Finding unused Python code
+#### Finding unused Python code
If you are contributing with Python code, you can use the tool
[`vulture`](https://pypi.python.org/pypi/vulture) to verify that you are not
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index 668d14fc0..009a1d95c 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -1,23 +1,17 @@
-#Deployment Types
+# Deployment Types
-This module supports OpenShift Origin, OpenShift Enterprise, and Atomic
-Enterprise Platform. Each deployment type sets various defaults used throughout
-your environment.
+This repository supports OpenShift Origin and OpenShift Container Platform.
-The table below outlines the defaults per `deployment_type`.
-
-| deployment_type | origin | enterprise (< 3.1) | atomic-enterprise | openshift-enterprise (>= 3.1) |
-|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | openshift | atomic-openshift | |
-| **openshift.common.config_base** | /etc/origin | /etc/openshift | /etc/origin | /etc/origin |
-| **openshift.common.data_dir** | /var/lib/origin | /var/lib/openshift | /var/lib/origin | /var/lib/origin |
-| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} |
-| **Image Streams** | centos | rhel + xpaas | N/A | rhel |
-
-
-**NOTE** `enterprise` deployment type is used for OpenShift Enterprise version
-3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will
-make use of the new `openshift-enterprise` deployment type. Additional work to
-migrate between the two will be forthcoming.
+Various defaults used throughout the playbooks and roles in this repository are
+set based on the deployment type configuration (usually defined in an Ansible
+hosts file).
+The table below outlines the defaults per `openshift_deployment_type`:
+| openshift_deployment_type | origin | openshift-enterprise |
+|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
+| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
+| **openshift.common.config_base** | /etc/origin | /etc/origin |
+| **openshift.common.data_dir** | /var/lib/origin | /var/lib/origin |
+| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
+| **Image Streams** | centos | rhel |
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index eecf3630b..000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,42 +0,0 @@
-# Using playbook2image as a base
-# See https://github.com/aweiteka/playbook2image for details on the image
-# including documentation for the settings/env vars referenced below
-FROM docker.io/aweiteka/playbook2image:latest
-
-MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-
-LABEL name="openshift-ansible" \
- summary="OpenShift's installation and configuration tool" \
- description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
- url="https://github.com/openshift/openshift-ansible" \
- io.k8s.display-name="openshift-ansible" \
- io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
- io.openshift.expose-services="" \
- io.openshift.tags="openshift,install,upgrade,ansible"
-
-USER root
-
-RUN INSTALL_PKGS="skopeo" && \
- yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
- rpm -V $INSTALL_PKGS && \
- yum clean all
-
-USER ${USER_UID}
-
-# The playbook to be run is specified via the PLAYBOOK_FILE env var.
-# This sets a default of openshift_facts.yml as it's an informative playbook
-# that can help test that everything is set properly (inventory, sshkeys)
-ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
- OPTS="-v" \
- INSTALL_OC=true
-
-# playbook2image's assemble script expects the source to be available in
-# /tmp/src (as per the source-to-image specs) so we import it there
-ADD . /tmp/src
-
-# Running the 'assemble' script provided by playbook2image will install
-# dependencies specified in requirements.txt and install the 'oc' client
-# as per the INSTALL_OC environment setting above
-RUN /usr/libexec/s2i/assemble
-
-CMD [ "/usr/libexec/s2i/run" ]
diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7
deleted file mode 100644
index 0d5a6038a..000000000
--- a/Dockerfile.rhel7
+++ /dev/null
@@ -1,41 +0,0 @@
-FROM openshift3/playbook2image
-
-MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-
-LABEL name="openshift3/openshift-ansible" \
- summary="OpenShift's installation and configuration tool" \
- description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
- url="https://github.com/openshift/openshift-ansible" \
- io.k8s.display-name="openshift-ansible" \
- io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
- io.openshift.expose-services="" \
- io.openshift.tags="openshift,install,upgrade,ansible" \
- com.redhat.component="aos3-installation-docker" \
- version="v3.4.1" \
- release="1" \
- architecture="x86_64"
-
-# Playbooks, roles and their dependencies are installed from packages.
-# Unlike in Dockerfile, we don't invoke the 'assemble' script here
-# because all content and dependencies (like 'oc') is already
-# installed via yum.
-USER root
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients" && \
- yum repolist > /dev/null && \
- yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \
- yum install -y $INSTALL_PKGS && \
- yum clean all
-
-USER ${USER_UID}
-
-# The playbook to be run is specified via the PLAYBOOK_FILE env var.
-# This sets a default of openshift_facts.yml as it's an informative playbook
-# that can help test that everything is set properly (inventory, sshkeys).
-# As the playbooks are installed via packages instead of being copied to
-# $APP_HOME by the 'assemble' script, we set the WORK_DIR env var to the
-# location of openshift-ansible.
-ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
- WORK_DIR=/usr/share/ansible/openshift-ansible \
- OPTS="-v"
-
-CMD [ "/usr/libexec/s2i/run" ]
diff --git a/README.md b/README.md
index 3ec6555e8..71912fb98 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,8 @@ Follow this release pattern and you can't go wrong:
| ------------- | ----------------- |
| 1.3 | 3.3 |
| 1.4 | 3.4 |
-| 1.*X* | 3.*X* |
+| 1.5 | 3.5 |
+| 3.*X* | 3.*X* |
If you're running from the openshift-ansible **master branch** we can
only guarantee compatibility with the newest origin releases **in
@@ -54,7 +55,7 @@ you are not running a stable release.
***
Requirements:
- - Ansible >= 2.2.0
+ - Ansible >= 2.2.2.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
@@ -83,7 +84,10 @@ See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for information on ho
See the [hooks documentation](HOOKS.md).
-
## Contributing
See the [contribution guide](CONTRIBUTING.md).
+
+## Building openshift-ansible RPMs and container images
+
+See the [build instructions](BUILD.md).
diff --git a/README_ANSIBLE_CONTAINER.md b/README_ANSIBLE_CONTAINER.md
deleted file mode 100644
index 30c5f8503..000000000
--- a/README_ANSIBLE_CONTAINER.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Running ansible in a docker container
-* Building ansible container:
-
- ```sh
- git clone https://github.com/openshift/openshift-ansible.git
- cd openshift-ansible
- docker build --rm -t ansible .
- ```
-* Create /etc/ansible directory on the host machine and copy inventory file (hosts) into it.
-* Copy ssh public key of the host machine to master and nodes machines in the cluster.
-* Running the ansible container:
-
- ```sh
- docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible ansible
- ```
diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md
index 5e013e809..c697783e3 100644
--- a/README_CONTAINERIZED_INSTALLATION.md
+++ b/README_CONTAINERIZED_INSTALLATION.md
@@ -38,7 +38,7 @@ and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or
### Requisite Images
-Based on your deployment_type the installer will make use of the following
+Based on your `openshift_deployment_type` the installer will make use of the following
images. Because you may make use of a private repository we've moved the
configuration of docker additional, insecure, and blocked registries to the
beginning of the installation process ensuring that these settings are applied
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index 35e057af3..a2151352d 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -1,41 +1,74 @@
# Containerized openshift-ansible to run playbooks
-The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks.
+The [Dockerfile](images/installer/Dockerfile) in this repository can be used to build a containerized `openshift-ansible`. The resulting image can run any of the provided playbooks. See [BUILD.md](BUILD.md) for image build instructions.
-**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
-
-## Build
-
-To build a container image of `openshift-ansible`:
+The image is designed to **run as a non-root user**. The container's UID is mapped to the username `default` at runtime. Therefore, the container's environment reflects that user's settings, and the configuration should match that. For example `$HOME` is `/opt/app-root/src`, so ssh keys are expected to be under `/opt/app-root/src/.ssh`. If you ran a container as `root` you would have to adjust the container's configuration accordingly, e.g. by placing ssh keys under `/root/.ssh` instead. Nevertheless, the expectation is that containers will be run as non-root; for example, this container image can be run inside OpenShift under the default `restricted` [security context constraint](https://docs.openshift.org/latest/architecture/additional_concepts/authorization.html#security-context-constraints).
-1. Using standalone **Docker**:
+**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
- cd openshift-ansible
- docker build -t openshift/openshift-ansible .
+## A note about the name of the image
-1. Using an **OpenShift** build:
+The released container images for openshift-ansible follow the naming scheme determined by OpenShift's `imageConfig.format` configuration option. This means that the released image name is `openshift/origin-ansible` instead of `openshift/openshift-ansible`.
- oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
- oc describe imagestream openshift-ansible
+This provides consistency with other images used by the platform and it's also a requirement for some use cases like using the image from [`oc cluster up`](https://github.com/openshift/origin/blob/master/docs/cluster_up_down.md).
## Usage
-The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
+At the very least, when running a container you must specify:
+
+1. An **inventory**. This can be a location inside the container (possibly mounted as a volume) with a path referenced via the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it, or `DYNAMIC_SCRIPT_URL` to download a script that provides a dynamic inventory.
-At the very least, when running a container using an image built this way you must specify:
+1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh` under normal usage (i.e. when running the container as non-root).
-1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it.
-1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh`
-1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment.
+1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collect and show facts about your OpenShift environment.
-Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
+Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry):
docker run -u `id -u` \
-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
-v /etc/ansible/hosts:/tmp/inventory \
-e INVENTORY_FILE=/tmp/inventory \
- -e OPTS="-v" \
- -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
- openshift/openshift-ansible
+ -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml \
+ -e OPTS="-v" -t \
+ openshift/origin-ansible
+
+You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below).
+
+Here is a detailed explanation of the options used in the command above:
+
+* ``-u `id -u` `` makes the container run with the same UID as the current user, which is required for permissions so that the ssh key can be read inside the container (ssh private keys are expected to be readable only by their owner). Usually you would invoke `docker run` as a non-root user that has privileges to run containers and leave that option as is.
+
+* `-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z` mounts your ssh key (`$HOME/.ssh/id_rsa`) under the `default` user's `$HOME/.ssh` in the container (as explained above, `/opt/app-root/src` is the `$HOME` of the `default` user in the container). If you mount the ssh key into a non-standard location you can add an environment variable with `-e ANSIBLE_PRIVATE_KEY_FILE=/the/mount/point` or set `ansible_ssh_private_key_file=/the/mount/point` as a variable in the inventory to point Ansible at it.
+
+ Note that the ssh key is mounted with the `:Z` flag: this is also required so that the container can read the ssh key from its restricted SELinux context; this means that *your original ssh key file will be re-labeled* to something like `system_u:object_r:container_file_t:s0:c113,c247`. For more details about `:Z` please check the `docker-run(1)` man page. Please keep this in mind when providing these volume mount specifications because this could have unexpected consequences: for example, if you mount (and therefore re-label) your whole `$HOME/.ssh` directory you will block `sshd` from accessing your keys. This is a reason why you might want to work on a separate copy of the ssh key, so that the original file's labels remain untouched.
+
+* `-v /etc/ansible/hosts:/tmp/inventory` and `-e INVENTORY_FILE=/tmp/inventory` mount the Ansible inventory file into the container as `/tmp/inventory` and set the corresponding environment variable to point at it respectively. The example uses `/etc/ansible/hosts` as the inventory file as this is a default location, but your inventory is likely to be elsewhere so please adjust as needed. Note that depending on the file you point to you might have to handle SELinux labels in a similar way as with the ssh keys, e.g. by adding a `:z` flag to the volume mount, so again you might prefer to copy the inventory to a dedicated location first.
+
+* `-e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
+
+* `-e OPTS="-v"` and `-t` make the output look nicer: the `default.yaml` playbook does not generate results and runs quietly unless we add the `-v` option to the `ansible-playbook` invocation, and a TTY is allocated via `-t` so that Ansible adds color to the output.
+
+Further usage examples are available in the [examples directory](examples/) with samples of how to use the image from within OpenShift.
+
+## Running openshift-ansible as a System Container
+
+Building the System Container: See the [BUILD.md](BUILD.md).
+
+Copy ssh public key of the host machine to master and nodes machines in the cluster.
+
+If the inventory file needs additional files then it can use the path `/var/lib/openshift-installer` in the container as it is bind mounted from the host (controllable with `VAR_LIB_OPENSHIFT_INSTALLER`).
+
+Run the ansible system container:
+
+```sh
+atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/origin-ansible
+systemctl start origin-ansible
+```
+
+The `INVENTORY_FILE` variable says to the installer what inventory file on the host will be bind mounted inside the container. In the example above, a file called `inventory.origin` in the current directory is used as the inventory file for the installer.
+
+And to finally cleanup the container:
-The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use an image built from it like this one.
+```
+atomic uninstall origin-ansible
+```
diff --git a/README_libvirt.md b/README_libvirt.md
index c523d83fb..1661681a0 100644
--- a/README_libvirt.md
+++ b/README_libvirt.md
@@ -15,7 +15,7 @@ Install dependencies
3. Install [ebtables](http://ebtables.netfilter.org/)
4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
5. Install [libvirt-python and libvirt](http://libvirt.org/)
-6. Install [genisoimage](http://cdrkit.org/)
+6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html)
7. Enable and start the libvirt daemon, e.g:
- `systemctl enable libvirtd`
- `systemctl start libvirtd`
@@ -23,6 +23,7 @@ Install dependencies
9. Check that your `$HOME` is accessible to the qemu user²
10. Configure dns resolution on the host³
11. Install libselinux-python
+12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
@@ -103,6 +104,11 @@ sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
server=/example.com/192.168.55.1
```
+#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub
+
+This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed.
+
+
Test The Setup
--------------
diff --git a/ansible.cfg b/ansible.cfg
index 034733684..589a58e9d 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -14,7 +14,15 @@ callback_plugins = callback_plugins/
forks = 20
host_key_checking = False
retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
nocows = True
+remote_user = root
+roles_path = roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
# Uncomment to use the provided BYO inventory
#hostfile = inventory/byo/hosts
@@ -28,3 +36,8 @@ nocows = True
# Additional ssh options for OpenShift Ansible
[ssh_connection]
pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r
diff --git a/bin/cluster b/bin/cluster
index b9b2ab15f..f77eb36ad 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -1,5 +1,4 @@
#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
import ConfigParser
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index f31445381..20bdd9056 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -7,7 +7,6 @@ The plugin is named with leading `aa_` to ensure this plugin is loaded
first (alphanumerically) by Ansible.
"""
import sys
-from subprocess import check_output
from ansible import __version__
if __version__ < '2.0':
@@ -30,13 +29,8 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.2.0.0'
-DESCRIPTION = "Supported versions: %s or newer (except 2.2.1.0)" % REQUIRED_VERSION
-FAIL_ON_2_2_1_0 = "There are known issues with Ansible version 2.2.1.0 which " \
- "are impacting OpenShift-Ansible. Please use Ansible " \
- "version 2.2.0.0 or a version greater than 2.2.1.0. " \
- "See this issue for more details: " \
- "https://github.com/openshift/openshift-ansible/issues/3111"
+REQUIRED_VERSION = '2.2.2.0'
+DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
def version_requirement(version):
@@ -64,13 +58,3 @@ class CallbackModule(CallbackBase):
'FATAL: Current Ansible version (%s) is not supported. %s'
% (__version__, DESCRIPTION), color='red')
sys.exit(1)
-
- if __version__ == '2.2.1.0':
- rpm_ver = str(check_output(["rpm", "-qa", "ansible"]))
- patched_ansible = '2.2.1.0-2'
-
- if patched_ansible not in rpm_ver:
- display(
- 'FATAL: Current Ansible version (%s) is not supported. %s'
- % (__version__, FAIL_ON_2_2_1_0), color='red')
- sys.exit(1)
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 7f3d85d40..e66c5addb 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -11,44 +11,9 @@ All new pull requests created against this repository MUST comply with this guid
This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
-== Pull Requests
-
-
-
-[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]]
-[cols="2v,v"]
-|===
-| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>>
-| All pull requests MUST pass the build bot *before* they are merged.
-|===
-
-The purpose of this rule is to avoid cases where the build bot will fail pull requests for code modified in a previous pull request.
-
-The tooling is flexible enough that exceptions can be made so that the tool the build bot is running will ignore certain areas or certain checks, but the build bot itself must pass for the pull request to be merged.
-
-
== Python
-=== Python Source Files
-
-'''
-[[Python-source-files-MUST-contain-the-following-vim-mode-line]]
-[cols="2v,v"]
-|===
-| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>>
-| Python source files MUST contain the following vim mode line.
-|===
-
-[source]
-----
-# vim: expandtab:tabstop=4:shiftwidth=4
-----
-
-Since most developers contributing to this repository use vim, this rule helps to promote consistency.
-
-If mode lines for other editors are needed, please open a GitHub issue.
-
=== Method Signatures
'''
@@ -509,12 +474,12 @@ The Ansible `package` module calls the associated package manager for the underl
# tasks.yml
- name: Install etcd (for etcdctl)
yum: name=etcd state=latest
- when: "ansible_pkg_mgr == yum"
+ when: ansible_pkg_mgr == yum
register: install_result
- name: Install etcd (for etcdctl)
dnf: name=etcd state=latest
- when: "ansible_pkg_mgr == dnf"
+ when: ansible_pkg_mgr == dnf
register: install_result
----
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
new file mode 100644
index 000000000..b6c1d8c5b
--- /dev/null
+++ b/docs/proposals/role_decomposition.md
@@ -0,0 +1,353 @@
+# Scaffolding for decomposing large roles
+
+## Why?
+
+Currently we have roles that are very large and encompass a lot of different
+components. This makes for a lot of logic required within the role, can
+create complex conditionals, and increases the learning curve for the role.
+
+## How?
+
+Creating a guide on how to approach breaking up a large role into smaller,
+component based, roles. Also describe how to develop new roles, to avoid creating
+large roles.
+
+## Proposal
+
+Create a new guide or append to the current contributing guide a process for
+identifying large roles that can be split up, and how to compose smaller roles
+going forward.
+
+### Large roles
+
+A role should be considered for decomposition if it:
+
+1) Configures/installs more than one product.
+1) Can configure multiple variations of the same product that can live
+side by side.
+1) Has different entry points for upgrading and installing a product
+
+Large roles<sup>1</sup> should be responsible for:
+> 1 or composing playbooks
+
+1) Composing smaller roles to provide a full solution such as an Openshift Master
+1) Ensuring that smaller roles are called in the correct order if necessary
+1) Calling smaller roles with their required variables
+1) Performing prerequisite tasks that small roles may depend on being in place
+(openshift_logging certificate generation for example)
+
+### Small roles
+
+A small role should be able to:
+
+1) Be deployed independently of other products (this is different than requiring
+being installed after other base components such as OCP)
+1) Be self contained and able to determine facts that it requires to complete
+1) Fail fast when facts it requires are not available or are invalid
+1) "Make it so" based on provided variables and anything that may be required
+as part of doing such (this should include data migrations)
+1) Have a minimal set of dependencies in meta/main.yml, just enough to do its job
+
+### Example using decomposition of openshift_logging
+
+The `openshift_logging` role was created as a port from the deployer image for
+the `3.5` deliverable. It was a large role that created the service accounts,
+configmaps, secrets, routes, and deployment configs/daemonset required for each
+of its different components (Fluentd, Kibana, Curator, Elasticsearch).
+
+It was possible to configure any of the components independently of one another,
+up to a point. However, it was an all of nothing installation and there was a
+need from customers to be able to do things like just deploy Fluentd.
+
+Also being able to support multiple versions of configuration files would become
+increasingly messy with a large role. Especially if the components had changes
+at different intervals.
+
+#### Folding of responsibility
+
+There was a duplicate of work within the installation of three of the four logging
+components where there was a possibility to deploy both an 'operations' and
+'non-operations' cluster side-by-side. The first step was to collapse that
+duplicate work into a single path and allow a variable to be provided to
+configure such that either possibility could be created.
+
+#### Consolidation of responsibility
+
+The generation of OCP objects required for each component were being created in
+the same task file, all Service Accounts were created at the same time, all secrets,
+configmaps, etc. The only components that were not generated at the same time were
+the deployment configs and the daemonset. The second step was to make the small
+roles self contained and generate their own required objects.
+
+#### Consideration for prerequisites
+
+Currently the Aggregated Logging stack generates its own certificates as it has
+some requirements that prevent it from utilizing the OCP cert generation service.
+In order to make sure that all components were able to trust one another as they
+did previously, until the cert generation service can be used, the certificate
+generation is being handled within the top level `openshift_logging` role and
+providing the location of the generated certificates to the individual roles.
+
+#### Snippets
+
+[openshift_logging/tasks/install_logging.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging/tasks/install_logging.yaml)
+```yaml
+- name: Gather OpenShift Logging Facts
+ openshift_logging_facts:
+ oc_bin: "{{openshift.common.client_binary}}"
+ openshift_logging_namespace: "{{openshift_logging_namespace}}"
+
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+
+- name: Create logging cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/logging"
+ state: directory
+ mode: 0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+## Elasticsearch
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_es_ops_deployment: true
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Kibana
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_ops_deployment: true
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
+ openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
+ openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
+ openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
+ openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
+ openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Curator
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_ops_deployment: true
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Fluentd
+- include_role:
+ name: openshift_logging_fluentd
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: update_master_config.yaml
+```
+
+[openshift_logging_elasticsearch/meta/main.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging_elasticsearch/meta/main.yaml)
+```yaml
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Elasticsearch Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+```
+
+[openshift_logging/meta/main.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging/meta/main.yaml)
+```yaml
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
+```
+
+[openshift_logging/tasks/install_support.yaml - old](https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_logging/tasks/install_support.yaml)
+```yaml
+---
+# This is the base configuration for installing the other components
+- name: Check for logging project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
+ register: logging_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in logging_project_result.stderr
+
+- name: Create logging cert directory
+ file: path={{openshift.common.config_base}}/logging state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: generate_configmaps.yaml
+
+- include: generate_services.yaml
+
+- name: Generate kibana-proxy oauth client
+ template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
+ vars:
+ secret: "{{oauth_secret}}"
+ when: oauth_secret is defined
+ check_mode: no
+ changed_when: no
+
+- include: generate_clusterroles.yaml
+
+- include: generate_rolebindings.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
+
+- include: generate_routes.yaml
+```
+
+# Limitations
+
+There will always be exceptions for some of these rules, however the majority of
+roles should be able to fall within these guidelines.
+
+# Additional considerations
+
+## Playbooks including playbooks
+In some circumstances it does not make sense to have a composing role but instead
+a playbook would be best for orchestrating the role flow. Decisions made regarding
+playbooks including playbooks will need to be taken into consideration as part of
+defining this process.
+Ref: (link to rteague's presentation?)
+
+## Role dependencies
+We want to make sure that our roles do not have any extra or unnecessary dependencies
+in meta/main.yml without:
+
+1. Proposing the inclusion in a team meeting or as part of the PR review and getting agreement
+1. Documenting in meta/main.yml why it is there and when it was agreed to (date)
+
+## Avoiding overly verbose roles
+When we are splitting our roles up into smaller components we want to ensure we
+avoid creating roles that are, for a lack of a better term, overly verbose. What
+do we mean by that? If we have `openshift_master` as an example, and we were to
+split it up, we would have a component for `etcd`, `docker`, and possibly for
+its rpms/configs. We would want to avoid creating a role that would just create
+certificates as those would make sense to be contained with the rpms and configs.
+Likewise, when it comes to being able to restart the master, we wouldn't have a
+role where that was its sole purpose.
+
+The same would apply for the `etcd` and `docker` roles. Anything that is required
+as part of installing `etcd` such as generating certificates, installing rpms,
+and upgrading data between versions should all be contained within the single
+`etcd` role.
+
+## Enforcing standards
+Certain naming standards like variable names could be verified as part of a Travis
+test. If we were going to also enforce that a role either has tasks or includes
+(for example) then we could create tests for that as well.
+
+## CI tests for individual roles
+If we are able to correctly split up roles, it should be possible to test role
+installations/upgrades like unit tests (assuming they would be able to be installed
+independently of other components).
diff --git a/docs/pull_requests.md b/docs/pull_requests.md
new file mode 100644
index 000000000..45ae01a9d
--- /dev/null
+++ b/docs/pull_requests.md
@@ -0,0 +1,95 @@
+# Pull Request process
+
+Pull Requests in the `openshift-ansible` project follow a
+[Continuous](https://en.wikipedia.org/wiki/Continuous_integration)
+[Integration](https://martinfowler.com/articles/continuousIntegration.html)
+process that is similar to the process observed in other repositories such as
+[`origin`](https://github.com/openshift/origin).
+
+Whenever a
+[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some
+automated test jobs must be successfully run before the PR can be merged.
+
+Some of these jobs are automatically triggered, e.g., Travis, PAPR, and
+Coveralls. Other jobs need to be manually triggered by a member of the
+[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors).
+
+## Triggering tests
+
+We have two different Jenkins infrastructures, and, while that holds true, there
+are two commands that trigger a different set of test jobs. We are working on
+simplifying the workflow towards a single infrastructure in the future.
+
+- **Test jobs on the older infrastructure**
+
+ Members of the [OpenShift organization](https://github.com/orgs/openshift/people)
+ can trigger the set of test jobs in the older infrastructure by writing a
+ comment with the exact text `aos-ci-test` and nothing else.
+
+ The Jenkins host is not publicly accessible. Test results are posted to S3
+ buckets when complete, and links are available both at the bottom of the Pull
+ Request page and as comments posted by
+ [@openshift-bot](https://github.com/openshift-bot).
+
+- **Test jobs on the newer infrastructure**
+
+ Members of the
+ [Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+ can trigger the set of test jobs in the newer infrastructure by writing a
+ comment containing `[test]` anywhere in the comment body.
+
+ The [Jenkins host](https://ci.openshift.redhat.com/jenkins/job/test_pull_request_openshift_ansible/)
+ is publicly accessible. Like for the older infrastructure, the result of each
+ job is also posted to the Pull Request as comments and summarized at the
+ bottom of the Pull Request page.
+
+### Fedora tests
+
+There are a set of tests that run on Fedora infrastructure. They are started
+automatically with every pull request.
+
+They are implemented using the [`PAPR` framework](https://github.com/projectatomic/papr).
+
+To re-run tests, write a comment containing only `bot, retest this please`.
+
+## Triggering merge
+
+After a PR is properly reviewed and a set of
+[required jobs](https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml)
+reported successfully, it can be tagged for merge by a member of the
+[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+by writing a comment containing `[merge]` anywhere in the comment body.
+
+Tagging a Pull Request for merge puts it in an automated merge queue. The
+[@openshift-bot](https://github.com/openshift-bot) monitors the queue and merges
+PRs that pass all of the required tests.
+
+### Manual merges
+
+The normal process described above should be followed: `aos-ci-test` and
+`[test]` / `[merge]`.
+
+In exceptional cases, such as when known problems with the merge queue prevent
+PRs from being merged, a PR may be manually merged if _all_ of these conditions
+are true:
+
+- [ ] Travis job must have passed (as enforced by GitHub)
+- [ ] Must have passed `aos-ci-test` (as enforced by GitHub)
+- [ ] Must have a positive review (as enforced by GitHub)
+- [ ] Must have failed the `[merge]` queue with a reported flake at least twice
+- [ ] Must have [issues labeled kind/test-flake](https://github.com/openshift/origin/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Ftest-flake) in [Origin](https://github.com/openshift/origin) linked in comments for the failures
+- [ ] Content must not have changed since all of the above conditions have been met (no rebases, no new commits)
+
+This exception is temporary and should be completely removed in the future once
+the merge queue has become more stable.
+
+Only members of the
+[Team OpenShift Ansible Committers](https://github.com/orgs/openshift/teams/team-openshift-ansible-committers)
+can perform manual merges.
+
+## Useful links
+
+- Repository containing Jenkins job definitions: https://github.com/openshift/aos-cd-jobs
+- List of required successful jobs before merge: https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml
+- Source code of the bot responsible for testing and merging PRs: https://github.com/openshift/test-pull-requests/
+- Trend of the time taken by merge jobs: https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
new file mode 100644
index 000000000..f598f22c3
--- /dev/null
+++ b/docs/repo_structure.md
@@ -0,0 +1,67 @@
+# Repository structure
+
+### Ansible
+
+```
+.
+├── inventory Contains dynamic inventory scripts, and examples of
+│ Ansible inventories.
+├── library Contains Python modules used by the playbooks.
+├── playbooks Contains Ansible playbooks targeting multiple use cases.
+└── roles Contains Ansible roles, units of shared behavior among
+ playbooks.
+```
+
+#### Ansible plugins
+
+These are plugins used in playbooks and roles:
+
+```
+.
+├── ansible-profile
+├── callback_plugins
+├── filter_plugins
+└── lookup_plugins
+```
+
+### Scripts
+
+```
+.
+├── bin [DEPRECATED] Contains the `bin/cluster` script, a
+│ wrapper around the Ansible playbooks that ensures proper
+│ configuration, and facilitates installing, updating,
+│ destroying and configuring OpenShift clusters.
+│ Note: this tool is kept in the repository for legacy
+│ reasons and will be removed at some point.
+└── utils Contains the `atomic-openshift-installer` command, an
+ interactive CLI utility to install OpenShift across a
+ set of hosts.
+```
+
+### Documentation
+
+```
+.
+└── docs Contains documentation for this repository.
+```
+
+### Tests
+
+```
+.
+└── test Contains tests.
+```
+
+### CI
+
+These files are used by [PAPR](https://github.com/projectatomic/papr),
+It is very similar in workflow to Travis, with the test
+environment and test scripts defined in a YAML file.
+
+```
+.
+├── .papr.yml
+├── .papr.sh
+└── .papr.inventory
+```
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 000000000..d54752fb9
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,93 @@
+# openshift-ansible usage examples
+
+The primary use of `openshift-ansible` is to install, configure and upgrade OpenShift clusters.
+
+This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+
+For OpenShift Container Platform there's also an installation utility that wraps `openshift-ansible`. This usage case is covered in the [Quick Installation](https://docs.openshift.com/container-platform/latest/install_config/install/quick_install.html) section of the documentation.
+
+The usage examples below cover use cases other than install/configure/upgrade.
+
+## Container image
+
+The examples below run [openshift-ansible in a container](../README_CONTAINER_IMAGE.md) to perform certificate expiration checks on an OpenShift cluster from pods running on the cluster itself.
+
+You can find more details about the certificate expiration check roles and example playbooks in [the openshift_certificate_expiry role's README](../roles/openshift_certificate_expiry/README.md).
+
+### Job to upload certificate expiration reports
+
+The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.openshift.org/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters.
+
+This example uses the [`easy-mode-upload.yaml`](../playbooks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`).
+
+The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.openshift.org/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.openshift.org/latest/dev_guide/secrets.html) named *sshkey*, so these are created first:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+Note that `inventory`, `hosts`, `sshkey` and `ssh-privatekey` are referenced by name from the provided example Job definition. If you use different names for the objects/attributes you will have to adjust the Job accordingly.
+
+To create the Job:
+
+ oc create -f examples/certificate-check-upload.yaml
+
+### Scheduled job for certificate expiration report upload
+
+**Note**: This example uses the [ScheduledJob](https://docs.openshift.com/container-platform/3.4/dev_guide/scheduled_jobs.html) object, which has been renamed to [CronJob](https://docs.openshift.org/latest/dev_guide/cron_jobs.html) upstream and is still a Technology Preview subject to further change.
+
+The example `ScheduledJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example).
+
+The job definition is the same and it expects the same configuration: we provide the inventory and ssh key via a ConfigMap and a Secret respectively:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+And then we create the ScheduledJob:
+
+ oc create -f examples/scheduled-certcheck-upload.yaml
+
+### Job and ScheduledJob to check certificates using volumes
+
+There are two additional examples:
+
+ - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml)
+ - A `ScheduledJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml)
+
+These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container.
+
+These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the [`html_and_json_timestamp.yaml`](../playbooks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it.
+
+You can later access the reports from another pod that mounts the same volume, or externally via direct access to the backend storage behind the matching `PersistentVolume`.
+
+To run these examples we prepare the inventory and ssh keys as in the other examples:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+Additionally we allocate a `PersistentVolumeClaim` to store the reports:
+
+ oc create -f - <<PVC
+ ---
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: certcheck-reports
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ PVC
+
+With that we can run the `Job` once:
+
+ oc create -f examples/certificate-check-volume.yaml
+
+or schedule it to run periodically as a `ScheduledJob`:
+
+ oc create -f examples/scheduled-certcheck-volume.yaml
+
diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml
new file mode 100644
index 000000000..1794cb096
--- /dev/null
+++ b/examples/certificate-check-upload.yaml
@@ -0,0 +1,53 @@
+# An example Job to run a certificate check of OpenShift's internal
+# certificate status from within OpenShift.
+#
+# The generated reports are uploaded to a location in the master
+# hosts, using the playbook 'easy-mode-upload.yaml'.
+#
+# This example uses the openshift/origin-ansible container image.
+# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
+#
+# The following objects are expected to be configured before the creation
+# of this Job:
+# - A ConfigMap named 'inventory' with a key named 'hosts' that
+# contains the the Ansible inventory file
+# - A Secret named 'sshkey' with a key named 'ssh-privatekey
+# that contains the ssh key to connect to the hosts
+# (see examples/README.md for more details)
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: certificate-check
+spec:
+ parallelism: 1
+ completions: 1
+ template:
+ metadata:
+ name: certificate-check
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/origin-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/easy-mode-upload.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ restartPolicy: Never
diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml
new file mode 100644
index 000000000..dd0a89c8e
--- /dev/null
+++ b/examples/certificate-check-volume.yaml
@@ -0,0 +1,60 @@
+# An example Job to run a certificate check of OpenShift's internal
+# certificate status from within OpenShift.
+#
+# The generated reports are stored in a Persistent Volume using
+# the playbook 'html_and_json_timestamp.yaml'.
+#
+# This example uses the openshift/origin-ansible container image.
+# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
+#
+# The following objects are expected to be configured before the creation
+# of this Job:
+# - A ConfigMap named 'inventory' with a key named 'hosts' that
+# contains the the Ansible inventory file
+# - A Secret named 'sshkey' with a key named 'ssh-privatekey
+# that contains the ssh key to connect to the hosts
+# - A PersistentVolumeClaim named 'certcheck-reports' where the
+# generated reports are going to be stored
+# (see examples/README.md for more details)
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: certificate-check
+spec:
+ parallelism: 1
+ completions: 1
+ template:
+ metadata:
+ name: certificate-check
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/origin-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ - name: reports
+ mountPath: /var/lib/certcheck
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ - name: reports
+ persistentVolumeClaim:
+ claimName: certcheck-reports
+ restartPolicy: Never
diff --git a/examples/scheduled-certcheck-upload.yaml b/examples/scheduled-certcheck-upload.yaml
new file mode 100644
index 000000000..05890a357
--- /dev/null
+++ b/examples/scheduled-certcheck-upload.yaml
@@ -0,0 +1,53 @@
+# An example ScheduledJob to run a regular check of OpenShift's internal
+# certificate status.
+#
+# Each job will upload new reports to a directory in the master hosts
+#
+# The Job specification is the same as 'certificate-check-upload.yaml'
+# and the expected pre-configuration is equivalent.
+# See that Job example and examples/README.md for more details.
+#
+# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
+# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
+# and once the API stabilizes the apiVersion will have to be updated too.
+---
+apiVersion: batch/v2alpha1
+kind: ScheduledJob
+metadata:
+ name: certificate-check
+ labels:
+ app: certcheck
+spec:
+ schedule: "0 0 1 * *" # every 1st day of the month at midnight
+ jobTemplate:
+ metadata:
+ labels:
+ app: certcheck
+ spec:
+ template:
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/origin-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/easy-mode-upload.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ restartPolicy: Never
diff --git a/examples/scheduled-certcheck-volume.yaml b/examples/scheduled-certcheck-volume.yaml
new file mode 100644
index 000000000..2f26e8809
--- /dev/null
+++ b/examples/scheduled-certcheck-volume.yaml
@@ -0,0 +1,58 @@
+# An example ScheduledJob to run a regular check of OpenShift's internal
+# certificate status.
+#
+# Each job will add a new pair of reports to the configured Persistent Volume
+#
+# The Job specification is the same as 'certificate-check-volume.yaml'
+# and the expected pre-configuration is equivalent.
+# See that Job example and examples/README.md for more details.
+#
+# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
+# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
+# and once the API stabilizes the apiVersion will have to be updated too.
+---
+apiVersion: batch/v2alpha1
+kind: ScheduledJob
+metadata:
+ name: certificate-check
+ labels:
+ app: certcheck
+spec:
+ schedule: "0 0 1 * *" # every 1st day of the month at midnight
+ jobTemplate:
+ metadata:
+ labels:
+ app: certcheck
+ spec:
+ template:
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/origin-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ - name: reports
+ mountPath: /var/lib/certcheck
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ - name: reports
+ persistentVolumeClaim:
+ claimName: certcheck-reports
+ restartPolicy: Never
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index a619f9ccb..36a90a870 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
+# pylint: disable=too-many-lines
"""
Custom filters for use in openshift-ansible
"""
@@ -10,6 +10,7 @@ import pdb
import random
import re
+from base64 import b64encode
from collections import Mapping
# pylint no-name-in-module and import-error disabled here because pylint
# fails to properly detect the packages when installed in a virtualenv
@@ -21,13 +22,16 @@ import pkg_resources
import yaml
from ansible import errors
-# pylint no-name-in-module and import-error disabled here because pylint
-# fails to properly detect the packages when installed in a virtualenv
-from ansible.compat.six import string_types # pylint:disable=no-name-in-module,import-error
-from ansible.compat.six.moves.urllib.parse import urlparse # pylint:disable=no-name-in-module,import-error
-from ansible.module_utils._text import to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import string_types, u
+ from ansible.compat.six.moves.urllib.parse import urlparse
+except ImportError:
+ from ansible.module_utils.six import string_types, u
+ from ansible.module_utils.six.moves.urllib.parse import urlparse
+
HAS_OPENSSL = False
try:
import OpenSSL.crypto
@@ -125,34 +129,57 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname):
return merged_hostvars
-def oo_collect(data, attribute=None, filters=None):
+def oo_collect(data_list, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
- Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
+ Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
+
+ This also deals with lists of lists with dict as elements.
+ Ex: data_list = [
+ [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'b':6, 'z': 'z'} # True, return
+ ],
+ [ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
+ ],
+ {'a':5, 'z': 'z'}, # True, return
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3, 5]
"""
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
+ if not isinstance(data_list, list):
+ raise errors.AnsibleFilterError("oo_collect expects to filter on a List")
if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ raise errors.AnsibleFilterError("oo_collect expects attribute to be set")
+
+ data = []
+ retval = []
+
+ for item in data_list:
+ if isinstance(item, list):
+ retval.extend(oo_collect(item, attribute, filters))
+ else:
+ data.append(item)
if filters is not None:
if not isinstance(filters, dict):
- raise errors.AnsibleFilterError("|failed expects filter to be a"
- " dict")
- retval = [get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))]
+ raise errors.AnsibleFilterError(
+ "oo_collect expects filter to be a dict")
+ retval.extend([get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))])
else:
- retval = [get_attr(d, attribute) for d in data]
+ retval.extend([get_attr(d, attribute) for d in data])
retval = [val for val in retval if val is not None]
@@ -167,10 +194,10 @@ def oo_select_keys_from_list(data, keys):
"""
if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
+ raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects to filter on a list")
if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [oo_select_keys(item, keys) for item in data]
@@ -186,10 +213,10 @@ def oo_select_keys(data, keys):
"""
if not isinstance(data, Mapping):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
+ raise errors.AnsibleFilterError("|oo_select_keys failed expects to filter on a dict or object")
if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ raise errors.AnsibleFilterError("|oo_select_keys failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
@@ -645,8 +672,7 @@ def oo_generate_secret(num_bytes):
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
- secret = os.urandom(num_bytes)
- return secret.encode('base-64').strip()
+ return b64encode(os.urandom(num_bytes)).decode('utf-8')
def to_padded_yaml(data, level=0, indent=2, **kw):
@@ -655,11 +681,11 @@ def to_padded_yaml(data, level=0, indent=2, **kw):
return ""
try:
- transformed = yaml.dump(data, indent=indent, allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper, **kw)
+ transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw))
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return to_text("\n{0}".format(padded))
+ return "\n{0}".format(padded)
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
@@ -689,7 +715,7 @@ def oo_openshift_env(hostvars):
return facts
-# pylint: disable=too-many-branches, too-many-nested-blocks
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
@@ -721,10 +747,15 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
nfs=dict(
@@ -734,18 +765,45 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
cinder=dict(
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+ persistent_volumes.append(persistent_volume)
elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
@@ -951,6 +1009,21 @@ def oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS
return ''.join(random.choice(source) for i in range(length))
+def oo_contains_rule(source, apiGroups, resources, verbs):
+ '''Return true if the specified rule is contained within the provided source'''
+
+ rules = source['rules']
+
+ if rules:
+ for rule in rules:
+ if set(rule['apiGroups']) == set(apiGroups):
+ if set(rule['resources']) == set(resources):
+ if set(rule['verbs']) == set(verbs):
+ return True
+
+ return False
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -991,5 +1064,6 @@ class FilterModule(object):
"oo_openshift_loadbalancer_frontends": oo_openshift_loadbalancer_frontends,
"oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
"to_padded_yaml": to_padded_yaml,
- "oo_random_word": oo_random_word
+ "oo_random_word": oo_random_word,
+ "oo_contains_rule": oo_contains_rule
}
diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py
index 8c7302052..cad95ea6d 100644
--- a/filter_plugins/openshift_node.py
+++ b/filter_plugins/openshift_node.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-node
'''
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
index 1403e9dcc..809e82488 100644
--- a/filter_plugins/openshift_version.py
+++ b/filter_plugins/openshift_version.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
-
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom version comparison filters for use in openshift-ansible
"""
diff --git a/hack/build-images.sh b/hack/build-images.sh
index f6210e239..6e6d360bf 100755
--- a/hack/build-images.sh
+++ b/hack/build-images.sh
@@ -7,10 +7,10 @@ set -o pipefail
STARTTIME=$(date +%s)
source_root=$(dirname "${0}")/..
-prefix="openshift/openshift-ansible"
+prefix="openshift/origin-ansible"
version="latest"
verbose=false
-options=""
+options="-f images/installer/Dockerfile"
help=false
for args in "$@"
@@ -44,10 +44,10 @@ if [ "$help" = true ]; then
echo "Options: "
echo " --prefix=PREFIX"
echo " The prefix to use for the image names."
- echo " default: openshift/openshift-ansible"
+ echo " default: openshift/origin-ansible"
echo
echo " --version=VERSION"
- echo " The version used to tag the image"
+ echo " The version used to tag the image (can be a comma-separated list)"
echo " default: latest"
echo
echo " --no-cache"
@@ -62,25 +62,33 @@ if [ "$help" = true ]; then
exit 0
fi
+
if [ "$verbose" = true ]; then
set -x
fi
BUILD_STARTTIME=$(date +%s)
comp_path=$source_root/
-docker_tag=${prefix}:${version}
+
+# turn comma-separated versions into -t args for docker build
+IFS=',' read -r -a version_arr <<< "$version"
+docker_tags=()
+for tag in "${version_arr[@]}"; do
+ docker_tags+=("-t" "${prefix}:${tag}")
+done
+
echo
echo
-echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---"
-docker build ${options} -t $docker_tag $comp_path
-BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+echo "--- Building component '$comp_path' with docker tag(s) '$version' ---"
+docker build ${options} "${docker_tags[@]}" $comp_path
+BUILD_ENDTIME=$(date +%s); echo "--- ${version} took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
echo
echo
echo
echo
echo "++ Active images"
-docker images | grep ${prefix} | grep ${version} | sort
+docker images | grep ${prefix} | sort
echo
diff --git a/hack/hooks/README.md b/hack/hooks/README.md
new file mode 100644
index 000000000..ef870540a
--- /dev/null
+++ b/hack/hooks/README.md
@@ -0,0 +1,37 @@
+# OpenShift-Ansible Git Hooks
+
+## Introduction
+
+This `hack` sub-directory holds
+[git commit hooks](https://www.atlassian.com/git/tutorials/git-hooks#conceptual-overview)
+you may use when working on openshift-ansible contributions. See the
+README in each sub-directory for an overview of what each hook does
+and if the hook has any specific usage or setup instructions.
+
+## Usage
+
+Basic git hook usage is simple:
+
+1) Copy (or symbolic link) the hook to the `$REPO_ROOT/.git/hooks/` directory
+2) Make the hook executable (`chmod +x $PATH_TO_HOOK`)
+
+## Multiple Hooks of the Same Type
+
+If you want to install multiple hooks of the same type, for example:
+multiple `pre-commit` hooks, you will need some kind of *hook
+dispatcher*. For an example of an easy to use hook dispatcher check
+out this gist by carlos-jenkins:
+
+* [multihooks.py](https://gist.github.com/carlos-jenkins/89da9dcf9e0d528ac978311938aade43)
+
+## Contributing Hooks
+
+If you want to contribute a new hook there are only a few criteria
+that must be met:
+
+* The hook **MUST** include a README describing the purpose of the hook
+* The README **MUST** describe special setup instructions if they are required
+* The hook **MUST** be in a sub-directory of this directory
+* The hook file **MUST** be named following the standard git hook
+ naming pattern (i.e., pre-commit hooks **MUST** be called
+ `pre-commit`)
diff --git a/hack/hooks/verify_generated_modules/README.md b/hack/hooks/verify_generated_modules/README.md
new file mode 100644
index 000000000..093fcf76a
--- /dev/null
+++ b/hack/hooks/verify_generated_modules/README.md
@@ -0,0 +1,19 @@
+# Verify Generated Modules
+
+Pre-commit hook for verifying that generated library modules match
+their EXPECTED content. Library modules are generated from fragments
+under the `roles/lib_(openshift|utils)/src/` directories.
+
+If the attempted commit modified files under the
+`roles/lib_(openshift|utils)/` directories this script will run the
+`generate.py --verify` command.
+
+This script will **NOT RUN** if module source fragments are modified
+but *not part of the commit*. I.e., you can still make commits if you
+modified module fragments AND other files but are *not comitting the
+the module fragments*.
+
+# Setup Instructions
+
+Standard installation procedure. Copy the hook to the `.git/hooks/`
+directory and ensure it is executable.
diff --git a/hack/hooks/verify_generated_modules/pre-commit b/hack/hooks/verify_generated_modules/pre-commit
new file mode 100755
index 000000000..8a319fd7e
--- /dev/null
+++ b/hack/hooks/verify_generated_modules/pre-commit
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+######################################################################
+# Pre-commit hook for verifying that generated library modules match
+# their EXPECTED content. Library modules are generated from fragments
+# under the 'roles/lib_(openshift|utils)/src/' directories.
+#
+# If the attempted commit modified files under the
+# 'roles/lib_(openshift|utils)/' directories this script will run the
+# 'generate.py --verify' command.
+#
+# This script will NOT RUN if module source fragments are modified but
+# not part of the commit. I.e., you can still make commits if you
+# modified module fragments AND other files but are not comitting the
+# the module fragments.
+
+# Did the commit modify any source module files?
+CHANGES=`git diff-index --stat --cached HEAD | grep -E '^ roles/lib_(openshift|utils)/src/(class|doc|ansible|lib)/'`
+RET_CODE=$?
+ABORT=0
+
+if [ "${RET_CODE}" -eq "0" ]; then
+ # Modifications detected. Run the verification scripts.
+
+ # Which was it?
+ if $(echo $CHANGES | grep -q 'roles/lib_openshift/'); then
+ echo "Validating lib_openshift..."
+ ./roles/lib_openshift/src/generate.py --verify
+ if [ "${?}" -ne "0" ]; then
+ ABORT=1
+ fi
+ fi
+
+ if $(echo $CHANGES | grep -q 'roles/lib_utils/'); then
+ echo "Validating lib_utils..."
+ ./roles/lib_utils/src/generate.py --verify
+ if [ "${?}" -ne "0" ]; then
+ ABORT=1
+ fi
+ fi
+
+ if [ "${ABORT}" -eq "1" ]; then
+ cat <<EOF
+
+ERROR: Module verification failed. Generated files do not match fragments.
+
+Choices to continue:
+ 1) Run './roles/lib_(openshift|utils)/src/generate.py' from the root of
+ the repo to regenerate the files
+ 2) Skip verification with '--no-verify' option to 'git commit'
+EOF
+ fi
+fi
+
+exit $ABORT
diff --git a/hack/push-release.sh b/hack/push-release.sh
index 8639143af..1f41ab179 100755
--- a/hack/push-release.sh
+++ b/hack/push-release.sh
@@ -1,55 +1,41 @@
#!/bin/bash
-# This script pushes all of the built images to a registry.
+# This script pushes a built image to a registry.
#
-# Set OS_PUSH_BASE_REGISTRY to prefix the destination images
+# Set OS_PUSH_BASE_REGISTRY to prefix the destination images e.g.
+# OS_PUSH_BASE_REGISTRY="docker.io/"
#
+# Set OS_PUSH_TAG with a comma-separated list for pushing same image
+# to multiple tags e.g.
+# OS_PUSH_TAG="latest,v3.6"
set -o errexit
set -o nounset
set -o pipefail
-STARTTIME=$(date +%s)
-OS_ROOT=$(dirname "${BASH_SOURCE}")/..
+starttime=$(date +%s)
-PREFIX="${PREFIX:-openshift/openshift-ansible}"
+# image name without repo or tag.
+image="${PREFIX:-openshift/origin-ansible}"
-# Go to the top of the tree.
-cd "${OS_ROOT}"
+# existing local tag on the image we want to push
+source_tag="${OS_TAG:-latest}"
-# Allow a release to be repushed with a tag
-tag="${OS_PUSH_TAG:-}"
-if [[ -n "${tag}" ]]; then
- tag=":${tag}"
-else
- tag=":latest"
-fi
-
-# Source tag
-source_tag="${OS_TAG:-}"
-if [[ -z "${source_tag}" ]]; then
- source_tag="latest"
-fi
-
-images=(
- ${PREFIX}
-)
+# Enable retagging a build with one or more tags for push
+IFS=',' read -r -a push_tags <<< "${OS_PUSH_TAG:-latest}"
+registry="${OS_PUSH_BASE_REGISTRY:-}"
+# force push if available
PUSH_OPTS=""
if docker push --help | grep -q force; then
PUSH_OPTS="--force"
fi
-if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then
- set -e
- for image in "${images[@]}"; do
- docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
- done
- set +e
-fi
-
-for image in "${images[@]}"; do
- docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+set -x
+for tag in "${push_tags[@]}"; do
+ docker tag "${image}:${source_tag}" "${registry}${image}:${tag}"
+ docker push ${PUSH_OPTS} "${registry}${image}:${tag}"
done
+set +x
-ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
+endtime=$(date +%s); echo "$0 took $(($endtime - $starttime)) seconds"; exit 0
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
new file mode 100644
index 000000000..d03f33a1d
--- /dev/null
+++ b/images/installer/Dockerfile
@@ -0,0 +1,43 @@
+FROM centos:7
+
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
+
+USER root
+
+# install ansible and deps
+RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless httpd-tools openssh-clients" \
+ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
+ && EPEL_PKGS="ansible python-passlib python2-boto" \
+ && yum install -y epel-release \
+ && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
+ && rpm -q $INSTALL_PKGS $EPEL_PKGS \
+ && yum clean all
+
+LABEL name="openshift/origin-ansible" \
+ summary="OpenShift's installation and configuration tool" \
+ description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ url="https://github.com/openshift/openshift-ansible" \
+ io.k8s.display-name="openshift-ansible" \
+ io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ io.openshift.expose-services="" \
+ io.openshift.tags="openshift,install,upgrade,ansible" \
+ atomic.run="once"
+
+ENV USER_UID=1001 \
+ HOME=/opt/app-root/src \
+ WORK_DIR=/usr/share/ansible/openshift-ansible \
+ OPTS="-v"
+
+# Add image scripts and files for running as a system container
+COPY images/installer/root /
+# Include playbooks, roles, plugins, etc. from this repo
+COPY . ${WORK_DIR}
+
+RUN /usr/local/bin/user_setup \
+ && rm /usr/local/bin/usage.ocp
+
+USER ${USER_UID}
+
+WORKDIR ${WORK_DIR}
+ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
+CMD [ "/usr/local/bin/run" ]
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
new file mode 100644
index 000000000..3110f409c
--- /dev/null
+++ b/images/installer/Dockerfile.rhel7
@@ -0,0 +1,46 @@
+FROM rhel7.3:7.3-released
+
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
+
+USER root
+
+# Playbooks, roles, and their dependencies are installed from packages.
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
+ && yum repolist > /dev/null \
+ && yum-config-manager --enable rhel-7-server-ose-3.6-rpms \
+ && yum-config-manager --enable rhel-7-server-rh-common-rpms \
+ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
+ && rpm -q $INSTALL_PKGS \
+ && yum clean all
+
+LABEL name="openshift3/ose-ansible" \
+ summary="OpenShift's installation and configuration tool" \
+ description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ url="https://github.com/openshift/openshift-ansible" \
+ io.k8s.display-name="openshift-ansible" \
+ io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ io.openshift.expose-services="" \
+ io.openshift.tags="openshift,install,upgrade,ansible" \
+ com.redhat.component="aos3-installation-docker" \
+ version="v3.6.0" \
+ release="1" \
+ architecture="x86_64" \
+ atomic.run="once"
+
+ENV USER_UID=1001 \
+ HOME=/opt/app-root/src \
+ WORK_DIR=/usr/share/ansible/openshift-ansible \
+ ANSIBLE_CONFIG=/usr/share/atomic-openshift-utils/ansible.cfg \
+ OPTS="-v"
+
+# Add image scripts and files for running as a system container
+COPY root /
+
+RUN /usr/local/bin/user_setup \
+ && mv /usr/local/bin/usage{.ocp,}
+
+USER ${USER_UID}
+
+WORKDIR ${WORK_DIR}
+ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
+CMD [ "/usr/local/bin/run" ]
diff --git a/images/installer/README_CONTAINER_IMAGE.md b/images/installer/README_CONTAINER_IMAGE.md
new file mode 100644
index 000000000..bc1ebb4a8
--- /dev/null
+++ b/images/installer/README_CONTAINER_IMAGE.md
@@ -0,0 +1,48 @@
+ORIGIN-ANSIBLE IMAGE INSTALLER
+===============================
+
+Contains Dockerfile information for building an openshift/origin-ansible image
+based on `centos:7` or `rhel7.3:7.3-released`.
+
+Read additional setup information for this image at: https://hub.docker.com/r/openshift/origin-ansible/
+
+Read additional information about the `openshift/origin-ansible` at: https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+Also contains necessary components for running the installer using an Atomic System Container.
+
+
+System container installer
+==========================
+
+These files are needed to run the installer using an [Atomic System container](http://www.projectatomic.io/blog/2016/09/intro-to-system-containers/).
+These files can be found under `root/exports`:
+
+* config.json.template - Template of the configuration file used for running containers.
+
+* manifest.json - Used to define various settings for the system container, such as the default values to use for the installation.
+
+* service.template - Template file for the systemd service.
+
+* tmpfiles.template - Template file for systemd-tmpfiles.
+
+These files can be found under `root/usr/local/bin`:
+
+* run-system-container.sh - Entrypoint to the container.
+
+## Options
+
+These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``root/exports/manifest.json``
+
+* OPTS - Additional options to pass to ansible when running the installer
+
+* VAR_LIB_OPENSHIFT_INSTALLER - Full path of the installer code to mount into the container
+
+* VAR_LOG_OPENSHIFT_LOG - Full path of the log file to mount into the container
+
+* PLAYBOOK_FILE - Full path of the playbook inside the container
+
+* HOME_ROOT - Full path on host to mount as the root home directory inside the container (for .ssh/, etc..)
+
+* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container
+
+* INVENTORY_FILE - Full path for the inventory to use from the host \ No newline at end of file
diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template
new file mode 100644
index 000000000..739c0080f
--- /dev/null
+++ b/images/installer/root/exports/config.json.template
@@ -0,0 +1,234 @@
+{
+ "ociVersion": "1.0.0",
+ "platform": {
+ "os": "linux",
+ "arch": "amd64"
+ },
+ "process": {
+ "terminal": false,
+ "consoleSize": {
+ "height": 0,
+ "width": 0
+ },
+ "user": {
+ "uid": 0,
+ "gid": 0
+ },
+ "args": [
+ "/usr/local/bin/run-system-container.sh"
+ ],
+ "env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm",
+ "OPTS=$OPTS",
+ "PLAYBOOK_FILE=$PLAYBOOK_FILE",
+ "ANSIBLE_CONFIG=$ANSIBLE_CONFIG"
+ ],
+ "cwd": "/opt/app-root/src/",
+ "rlimits": [
+ {
+ "type": "RLIMIT_NOFILE",
+ "hard": 1024,
+ "soft": 1024
+ }
+ ],
+ "noNewPrivileges": true
+ },
+ "root": {
+ "path": "rootfs",
+ "readonly": true
+ },
+ "mounts": [
+ {
+ "destination": "/proc",
+ "type": "proc",
+ "source": "proc"
+ },
+ {
+ "destination": "/dev",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755",
+ "size=65536k"
+ ]
+ },
+ {
+ "destination": "/dev/pts",
+ "type": "devpts",
+ "source": "devpts",
+ "options": [
+ "nosuid",
+ "noexec",
+ "newinstance",
+ "ptmxmode=0666",
+ "mode=0620",
+ "gid=5"
+ ]
+ },
+ {
+ "destination": "/dev/shm",
+ "type": "tmpfs",
+ "source": "shm",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "mode=1777",
+ "size=65536k"
+ ]
+ },
+ {
+ "destination": "/dev/mqueue",
+ "type": "mqueue",
+ "source": "mqueue",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev"
+ ]
+ },
+ {
+ "destination": "/sys",
+ "type": "sysfs",
+ "source": "sysfs",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "ro"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$HOME_ROOT/.ssh",
+ "destination": "/opt/app-root/src/.ssh",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$HOME_ROOT",
+ "destination": "/root",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$VAR_LIB_OPENSHIFT_INSTALLER",
+ "destination": "/var/lib/openshift-installer",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$VAR_LOG_OPENSHIFT_LOG",
+ "destination": "/var/log/ansible.log",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/root/.ansible",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/tmp",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$INVENTORY_FILE",
+ "destination": "/etc/ansible/hosts",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/etc/resolv.conf",
+ "type": "bind",
+ "source": "/etc/resolv.conf",
+ "options": [
+ "ro",
+ "rbind",
+ "rprivate"
+ ]
+ },
+ {
+ "destination": "/sys/fs/cgroup",
+ "type": "cgroup",
+ "source": "cgroup",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "relatime",
+ "ro"
+ ]
+ }
+ ],
+ "hooks": {
+
+ },
+ "linux": {
+ "resources": {
+ "devices": [
+ {
+ "allow": false,
+ "access": "rwm"
+ }
+ ]
+ },
+ "namespaces": [
+ {
+ "type": "pid"
+ },
+ {
+ "type": "mount"
+ }
+ ],
+ "maskedPaths": [
+ "/proc/kcore",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/sys/firmware"
+ ],
+ "readonlyPaths": [
+ "/proc/asound",
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ]
+ }
+}
diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json
new file mode 100644
index 000000000..8b984d7a3
--- /dev/null
+++ b/images/installer/root/exports/manifest.json
@@ -0,0 +1,12 @@
+{
+ "version": "1.0",
+ "defaultValues": {
+ "OPTS": "",
+ "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
+ "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "HOME_ROOT": "/root",
+ "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
+ "INVENTORY_FILE": "/dev/null"
+ }
+}
diff --git a/images/installer/root/exports/service.template b/images/installer/root/exports/service.template
new file mode 100644
index 000000000..bf5316af6
--- /dev/null
+++ b/images/installer/root/exports/service.template
@@ -0,0 +1,6 @@
+[Service]
+ExecStart=$EXEC_START
+ExecStop=-$EXEC_STOP
+Restart=no
+WorkingDirectory=$DESTDIR
+Type=oneshot
diff --git a/images/installer/root/exports/tmpfiles.template b/images/installer/root/exports/tmpfiles.template
new file mode 100644
index 000000000..b1f6caf47
--- /dev/null
+++ b/images/installer/root/exports/tmpfiles.template
@@ -0,0 +1,2 @@
+d $VAR_LIB_OPENSHIFT_INSTALLER - - - - -
+f $VAR_LOG_OPENSHIFT_LOG - - - - -
diff --git a/images/installer/root/usr/local/bin/entrypoint b/images/installer/root/usr/local/bin/entrypoint
new file mode 100755
index 000000000..777bf3f11
--- /dev/null
+++ b/images/installer/root/usr/local/bin/entrypoint
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+#
+# This file serves as the main entrypoint to the openshift-ansible image.
+#
+# For more information see the documentation:
+# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+ echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+exec "$@"
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
new file mode 100755
index 000000000..9401ea118
--- /dev/null
+++ b/images/installer/root/usr/local/bin/run
@@ -0,0 +1,46 @@
+#!/bin/bash -e
+#
+# This file serves as the default command to the openshift-ansible image.
+# Runs a playbook with inventory as specified by environment variables.
+#
+# For more information see the documentation:
+# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+# SOURCE and HOME DIRECTORY: /opt/app-root/src
+
+if [[ -z "${PLAYBOOK_FILE}" ]]; then
+ echo
+ echo "PLAYBOOK_FILE must be provided."
+ exec /usr/local/bin/usage
+fi
+
+INVENTORY="$(mktemp)"
+if [[ -v INVENTORY_FILE ]]; then
+ # Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
+ # does not attempt to modify the original
+ cp -a ${INVENTORY_FILE} ${INVENTORY}
+elif [[ -v INVENTORY_URL ]]; then
+ curl -o ${INVENTORY} ${INVENTORY_URL}
+elif [[ -v DYNAMIC_SCRIPT_URL ]]; then
+ curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL}
+ chmod 755 ${INVENTORY}
+else
+ echo
+ echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided."
+ exec /usr/local/bin/usage
+fi
+INVENTORY_ARG="-i ${INVENTORY}"
+
+if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then
+ sed -i s/ansible_connection=local// ${INVENTORY}
+fi
+
+if [[ -v VAULT_PASS ]]; then
+ VAULT_PASS_FILE=.vaultpass
+ echo ${VAULT_PASS} > ${VAULT_PASS_FILE}
+ VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}"
+fi
+
+cd ${WORK_DIR}
+
+exec ansible-playbook ${INVENTORY_ARG} ${VAULT_PASS_ARG} ${OPTS} ${PLAYBOOK_FILE}
diff --git a/images/installer/root/usr/local/bin/run-system-container.sh b/images/installer/root/usr/local/bin/run-system-container.sh
new file mode 100755
index 000000000..9ce7c7328
--- /dev/null
+++ b/images/installer/root/usr/local/bin/run-system-container.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export ANSIBLE_LOG_PATH=/var/log/ansible.log
+exec ansible-playbook -i /etc/ansible/hosts ${OPTS} ${PLAYBOOK_FILE}
diff --git a/images/installer/root/usr/local/bin/usage b/images/installer/root/usr/local/bin/usage
new file mode 100755
index 000000000..3518d7f19
--- /dev/null
+++ b/images/installer/root/usr/local/bin/usage
@@ -0,0 +1,33 @@
+#!/bin/bash -e
+cat <<"EOF"
+
+The origin-ansible image provides several options to control the behaviour of the containers.
+For more details on these options see the documentation:
+
+ https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+At a minimum, when running a container using this image you must provide:
+
+* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under
+ /opt/app-root/src/.ssh
+* An inventory file. This can be mounted inside the container as a volume and specified with the
+ INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web
+ server and use the INVENTORY_URL environment variable to fetch it.
+* The playbook to run. This is set using the PLAYBOOK_FILE environment variable.
+
+Here is an example of how to run a containerized origin-ansible with
+the openshift_facts playbook, which collects and displays facts about your
+OpenShift environment. The inventory and ssh keys are mounted as volumes
+(the latter requires setting the uid in the container and SELinux label
+in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE
+environment variable is set to point to the playbook within the image:
+
+docker run -tu `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \
+ -v /etc/ansible/hosts:/tmp/inventory:Z,ro \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ openshift/origin-ansible
+
+EOF
diff --git a/images/installer/root/usr/local/bin/usage.ocp b/images/installer/root/usr/local/bin/usage.ocp
new file mode 100755
index 000000000..50593af6e
--- /dev/null
+++ b/images/installer/root/usr/local/bin/usage.ocp
@@ -0,0 +1,33 @@
+#!/bin/bash -e
+cat <<"EOF"
+
+The ose-ansible image provides several options to control the behaviour of the containers.
+For more details on these options see the documentation:
+
+ https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+At a minimum, when running a container using this image you must provide:
+
+* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under
+ /opt/app-root/src/.ssh
+* An inventory file. This can be mounted inside the container as a volume and specified with the
+ INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web
+ server and use the INVENTORY_URL environment variable to fetch it.
+* The playbook to run. This is set using the PLAYBOOK_FILE environment variable.
+
+Here is an example of how to run a containerized ose-ansible with
+the openshift_facts playbook, which collects and displays facts about your
+OpenShift environment. The inventory and ssh keys are mounted as volumes
+(the latter requires setting the uid in the container and SELinux label
+in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE
+environment variable is set to point to the playbook within the image:
+
+docker run -tu `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \
+ -v /etc/ansible/hosts:/tmp/inventory:Z,ro \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ openshift3/ose-ansible
+
+EOF
diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup
new file mode 100755
index 000000000..b76e60a4d
--- /dev/null
+++ b/images/installer/root/usr/local/bin/user_setup
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -x
+
+# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
+mkdir -p ${HOME}
+chown ${USER_UID}:0 ${HOME}
+chmod ug+rwx ${HOME}
+
+# runtime user will need to be able to self-insert in /etc/passwd
+chmod g+rw /etc/passwd
+
+# ensure that the ansible content is accessible
+chmod -R g+r ${WORK_DIR}
+find ${WORK_DIR} -type d -exec chmod g+x {} +
+
+# no need for this script to remain in the image after running
+rm $0
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
new file mode 100644
index 000000000..5a284ce97
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.external.example
@@ -0,0 +1,56 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster
+openshift_storage_glusterfs_is_native=False
+# Specify the IP address or hostname of the external heketi service
+openshift_storage_glusterfs_heketi_url=172.0.0.1
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
new file mode 100644
index 000000000..d16df6470
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.mixed.example
@@ -0,0 +1,59 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster and a native
+# heketi service
+openshift_storage_glusterfs_is_native=False
+openshift_storage_glusterfs_heketi_is_native=True
+# Specify that heketi will use SSH to communicate to the GlusterFS nodes and
+# the private key file it will use for authentication
+openshift_storage_glusterfs_heketi_executor=ssh
+openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
new file mode 100644
index 000000000..c1a1f6f84
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.native.example
@@ -0,0 +1,46 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for applications. It
+# will also autmatically create a StorageClass for this purpose.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
new file mode 100644
index 000000000..31a85ee42
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example
@@ -0,0 +1,52 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for exclusive use
+# as storage for a natively hosted Docker registry.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs_registry
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs_registry]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
new file mode 100644
index 000000000..54bd89ddc
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
@@ -0,0 +1,63 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for both general
+# application use and a natively hosted Docker registry. It will also create a
+# StorageClass for the general storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+glusterfs_registry
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# It is recommended to not use a single cluster for both general and registry
+# storage, so two three-node clusters will be required.
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+
+[glusterfs_registry]
+node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index d61f033f8..de7493f71 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -10,6 +10,10 @@ nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
@@ -22,7 +26,7 @@ ansible_ssh_user=root
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
-# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
+# Specify the deployment type. Valid values are origin and openshift-enterprise.
openshift_deployment_type=origin
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
@@ -30,17 +34,28 @@ openshift_deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.5
+openshift_release=v3.6
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v1.5.0
+#openshift_image_tag=v3.6.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-1.5.0
+#openshift_pkg_version=-3.6.0
+
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
# Install the openshift examples
#openshift_install_examples=true
@@ -78,6 +93,18 @@ openshift_release=v1.5
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Force the registry to use for the system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -86,6 +113,11 @@ openshift_release=v1.5
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -115,13 +147,18 @@ openshift_release=v1.5
# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Origin copr repo
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -175,6 +212,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -313,8 +357,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
-# Disable management of the OpenShift Router
-#openshift_hosted_manage_router=false
+# Manage the OpenShift Router
+#openshift_hosted_manage_router=true
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
@@ -333,7 +377,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# selector: type=router1
# images: "openshift3/ose-${component}:${version}"
# edits: []
-# certificates:
+# certificate:
# certfile: /path/to/certificate/abc.crt
# keyfile: /path/to/certificate/abc.key
# cafile: /path/to/certificate/ca.crt
@@ -347,7 +391,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# serviceaccount: router
# selector: type=router2
# images: "openshift3/ose-${component}:${version}"
-# certificates:
+# certificate:
# certfile: /path/to/certificate/xyz.crt
# keyfile: /path/to/certificate/xyz.key
# cafile: /path/to/certificate/ca.crt
@@ -391,8 +435,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
-# Disable management of the OpenShift Registry
-#openshift_hosted_manage_registry=false
+# Manage the OpenShift Registry
+#openshift_hosted_manage_registry=true
# Registry Storage Options
#
@@ -430,6 +474,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -485,6 +531,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -496,6 +543,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -509,6 +557,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
+#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+#
# Logging deployment
#
@@ -526,6 +582,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -537,6 +594,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -556,7 +614,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=1.5.0
+#openshift_hosted_logging_deployer_version=3.6.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -568,10 +626,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
@@ -662,7 +727,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -688,6 +753,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -695,7 +764,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -732,6 +805,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
@@ -751,19 +828,70 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing, available as of 1.3
-#openshift_master_audit_config={"basicAuditEnabled": true}
+#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by deployment_type=origin
#openshift_enable_origin_repo=false
-# Validity of the auto-generated certificates in days.
+# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
#openshift_ca_cert_expire_days=1825
#openshift_node_cert_expire_days=730
#openshift_master_cert_expire_days=730
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
+
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 823d6f58f..62a364e0d 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -10,6 +10,10 @@ nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
@@ -22,7 +26,7 @@ ansible_ssh_user=root
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
-# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
+# Specify the deployment type. Valid values are origin and openshift-enterprise.
openshift_deployment_type=openshift-enterprise
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
@@ -30,17 +34,28 @@ openshift_deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.5
+openshift_release=v3.6
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.5.0
+#openshift_image_tag=v3.6.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.5.0
+#openshift_pkg_version=-3.6.0
+
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
# Install the openshift examples
#openshift_install_examples=true
@@ -78,10 +93,27 @@ openshift_release=v3.5
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Force the registry to use for the system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
@@ -113,15 +145,19 @@ openshift_release=v3.5
# Tasks to run after each master is upgraded and system/services have been restarted.
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Additional yum repos to install
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -175,6 +211,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -313,8 +356,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
-# Disable management of the OpenShift Router
-#openshift_hosted_manage_router=false
+# Manage the OpenShift Router (optional)
+#openshift_hosted_manage_router=true
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
@@ -333,7 +376,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# selector: type=router1
# images: "openshift3/ose-${component}:${version}"
# edits: []
-# certificates:
+# certificate:
# certfile: /path/to/certificate/abc.crt
# keyfile: /path/to/certificate/abc.key
# cafile: /path/to/certificate/ca.crt
@@ -347,7 +390,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# serviceaccount: router
# selector: type=router2
# images: "openshift3/ose-${component}:${version}"
-# certificates:
+# certificate:
# certfile: /path/to/certificate/xyz.crt
# keyfile: /path/to/certificate/xyz.key
# cafile: /path/to/certificate/ca.crt
@@ -391,8 +434,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
-# Disable management of the OpenShift Registry
-#openshift_hosted_manage_registry=false
+# Manage the OpenShift Registry (optional)
+#openshift_hosted_manage_registry=true
# Registry Storage Options
#
@@ -431,6 +474,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -486,6 +531,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -497,6 +543,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -510,6 +557,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+#
# Logging deployment
#
@@ -527,6 +582,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -538,6 +594,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -557,7 +614,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.5.0
+#openshift_hosted_logging_deployer_version=3.6.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -569,10 +626,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
@@ -663,7 +727,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -689,6 +753,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -696,7 +764,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -733,6 +805,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
@@ -752,15 +828,66 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing, available as of 3.2
-#openshift_master_audit_config={"basicAuditEnabled": true}
+#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
-# Validity of the auto-generated certificates in days.
+# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
#openshift_ca_cert_expire_days=1825
#openshift_node_cert_expire_days=730
#openshift_master_cert_expire_days=730
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
+
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/library/kubeclient_ca.py b/library/kubeclient_ca.py
index 163624a76..a89a5574f 100644
--- a/library/kubeclient_ca.py
+++ b/library/kubeclient_ca.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
''' kubeclient_ca ansible module '''
import base64
diff --git a/library/modify_yaml.py b/library/modify_yaml.py
index 8706e80c2..9b8f9ba33 100755
--- a/library/modify_yaml.py
+++ b/library/modify_yaml.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
''' modify_yaml ansible module '''
import yaml
diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py
index 7909d0092..4581cb6b8 100644
--- a/lookup_plugins/oo_option.py
+++ b/lookup_plugins/oo_option.py
@@ -1,7 +1,5 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
'''
oo_option lookup plugin for openshift-ansible
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7ffe69a79..9cadf5947 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.14
+Version: 3.7.1
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -17,7 +17,7 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.2.0.0-1
+Requires: ansible >= 2.2.2.0
Requires: python2
Requires: python-six
Requires: tar
@@ -25,6 +25,7 @@ Requires: openshift-ansible-docs = %{version}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
+Requires: python-passlib
%description
Openshift and Atomic Enterprise Ansible
@@ -76,6 +77,9 @@ find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name filter_plugins -
cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv role
rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
+# touch a file in contiv so that it can be added to SCM's
+touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
+
# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
@@ -157,23 +161,29 @@ BuildArch: noarch
%files playbooks
%{_datadir}/ansible/%{name}/playbooks
-# We moved playbooks/common/openshift-master/library up to the top and replaced
-# it with a symlink. RPM doesn't handle this so we have to do some pre-transaction
-# magic. See https://fedoraproject.org/wiki/Packaging:Directory_Replacement
+# Along the history of openshift-ansible, some playbook directories had to be
+# moved and were replaced with symlinks for backwards compatibility.
+# RPM doesn't handle this so we have to do some pre-transaction magic.
+# See https://fedoraproject.org/wiki/Packaging:Directory_Replacement
%pretrans playbooks -p <lua>
--- Define the path to directory being replaced below.
+-- Define the paths to directories being replaced below.
-- DO NOT add a trailing slash at the end.
-path = "/usr/share/ansible/openshift-ansible/playbooks/common/openshift-master/library"
-st = posix.stat(path)
-if st and st.type == "directory" then
- status = os.rename(path, path .. ".rpmmoved")
- if not status then
- suffix = 0
- while not status do
- suffix = suffix + 1
- status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+dirs_to_sym = {
+ "/usr/share/ansible/openshift-ansible/playbooks/common/openshift-master/library",
+ "/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry"
+}
+for i,path in ipairs(dirs_to_sym) do
+ st = posix.stat(path)
+ if st and st.type == "directory" then
+ status = os.rename(path, path .. ".rpmmoved")
+ if not status then
+ suffix = 0
+ while not status do
+ suffix = suffix + 1
+ status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+ end
+ os.rename(path, path .. ".rpmmoved")
end
- os.rename(path, path .. ".rpmmoved")
end
end
@@ -270,6 +280,1134 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jul 27 2017 Scott Dodson <sdodson@redhat.com> 3.7.1-1
+- Fix incorrect delegate_to in control plane upgrade (sdodson@redhat.com)
+- Follow the new naming conventions. (zhang.wanmin@zte.com.cn)
+- Simplify generation of /etc/origin/node/resolv.conf (sdodson@redhat.com)
+- Add glusterfs hosts to oo_all_hosts so that hosts set initial facts.
+ (abutcher@redhat.com)
+- Sync all openshift.common.use_openshift_sdn uses in yaml files
+ (jchaloup@redhat.com)
+- Fixing podpresets perms for service-catalog-controller (ewolinet@redhat.com)
+- Fixing route spec caCertificate to be correctly capitalized
+ (ewolinet@redhat.com)
+- Set TimeoutStartSec=300 (sdodson@redhat.com)
+- Revert "set KillMode to process in node service file" (sdodson@redhat.com)
+- openshift_checks: refactor to internalize task_vars (lmeyer@redhat.com)
+- openshift_checks: get rid of deprecated module_executor (lmeyer@redhat.com)
+- openshift_checks: improve comments/names (lmeyer@redhat.com)
+- add default value for router path in the cert (efreiber@redhat.com)
+- Router wildcard certificate created by default (efreiber@redhat.com)
+- Remove unsupported parameters from example inventory files.
+ (jarrpa@redhat.com)
+- Fix lint errors (sdodson@redhat.com)
+- Metrics: grant hawkular namespace listener role (mwringe@redhat.com)
+- Removing nolog from htpasswd invocation so not to supress errors
+ (ewolinet@redhat.com)
+- Removed kubernetes.io string from default. (kwoodson@redhat.com)
+- Allow storage migrations to be optional and/or non fatal (sdodson@redhat.com)
+- libvirt: fall back to mkisofs if genisoimage isn't available
+ (dcbw@redhat.com)
+- libvirt: add documentation about SSH keypair requirements (dcbw@redhat.com)
+- Updating how storage type is determined, adding bool filter in
+ openshift_logging_elasticsearch (ewolinet@redhat.com)
+- Pass the provisioner to the module. (kwoodson@redhat.com)
+- Use absolute path when unexcluding (Sergi Jimenez)
+- Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1474246 (Sergi Jimenez)
+- Support enabling the centos-openshift-origin-testing repository
+ (dms@redhat.com)
+- 1472467- add ose- prefix to ansible service broker name (fabian@fabianism.us)
+- Updating openshift_logging_kibana default for kibana hostname
+ (ewolinet@redhat.com)
+- GlusterFS: Create registry storage svc and ep in registry namespace
+ (jarrpa@redhat.com)
+- Default an empty list for etcd_to_config if not there (tbielawa@redhat.com)
+- If proxy in effect, add etcd host IP addresses to NO_PROXY list on masters
+ (tbielawa@redhat.com)
+- GlusterFS: Pass all booleans through bool filter. (jarrpa@redhat.com)
+- GlusterFS: Fix bug in detecting whether to open firewall ports.
+ (jarrpa@redhat.com)
+- Pass first master's openshift_image_tag to openshift_loadbalancer for
+ containerized haproxy installation. (abutcher@redhat.com)
+- verify sane log times in logging stack (jvallejo@redhat.com)
+- Fix log dumping on service failure (sdodson@redhat.com)
+- Updating verbs for serviceclasses objects (ewolinet@redhat.com)
+- Fix broken link to Docker image instructions (rhcarvalho@gmail.com)
+- Added parameters inside of gce defaults. Pass all params to the module.
+ (kwoodson@redhat.com)
+- add etcd increased-traffic check (jvallejo@redhat.com)
+- Add etcd exports to openshift_storage_nfs (abutcher@redhat.com)
+- Hopefully finally fix the no_proxy settings (tbielawa@redhat.com)
+- openshift_checks/docker_storage: overlay/2 support (lmeyer@redhat.com)
+- Removing parameter kind and allowing default to be passed.
+ (kwoodson@redhat.com)
+- Remove openshift_use_dnsmasq from aws and libvirt playbooks
+ (sdodson@redhat.com)
+- 1471973- default to bootstrapping the broker on startup (fabian@fabianism.us)
+- image builds: remove dependency on playbook2image (jvallejo@redhat.com)
+- Setting node selector to be empty string (ewolinet@redhat.com)
+- Add drain retries after 60 second delay (sdodson@redhat.com)
+- Dump some logs (sdodson@redhat.com)
+- daemon_reload on node and ovs start (sdodson@redhat.com)
+- Ensure proper fact evaluation (sdodson@redhat.com)
+- Wrap additional service changes in retries (sdodson@redhat.com)
+- Wrap docker stop in retries (sdodson@redhat.com)
+- Add retries to node restart handlers (sdodson@redhat.com)
+- Test docker restart with retries 3 delay 30 (smilner@redhat.com)
+- Adding podpreset config into master-config (ewolinet@redhat.com)
+- Update image-gc-high-threshold value (decarr@redhat.com)
+- Adding a check for variable definition. (kwoodson@redhat.com)
+- docker: fix docker_selinux_enabled (lmeyer@redhat.com)
+- Changing cluster role to admin (rhallise@redhat.com)
+- drain still pending in below files without fix : (jkaur@redhat.com)
+- Fixed spacing and lint errors. (kwoodson@redhat.com)
+- Switch CI to ansible-2.3.1.0 (sdodson@redhat.com)
+- Allow OVS 2.7 in latest OpenShift releases (rhcarvalho@gmail.com)
+- Make aos_version module handle multiple versions (rhcarvalho@gmail.com)
+- Split positive and negative unit tests (rhcarvalho@gmail.com)
+- GlusterFS: Create in custom namespace by default (jarrpa@redhat.com)
+- hosted registry: Use proper node name in GlusterFS storage setup
+ (jarrpa@redhat.com)
+- GlusterFS: Make heketi-cli command configurable (jarrpa@redhat.com)
+- GlusterFS: Reintroduce heketi-cli check for non-native heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Bug fixes for external GlusterFS nodes (jarrpa@redhat.com)
+- GlusterFS: Improve and extend example inventory files (jarrpa@redhat.com)
+- Fixed tests and added sleep for update. (kwoodson@redhat.com)
+- Fixing needs_update comparison. Added a small pause for race conditions.
+ Fixed doc. Fix kind to storageclass (kwoodson@redhat.com)
+- Adding storageclass support to lib_openshift. (kwoodson@redhat.com)
+- Add an SA policy to the ansible-service-broker (rhallise@redhat.com)
+- Import templates will fail if user is not system:admin (jkaur@redhat.com)
+- Additional optimization parameters for ansible.cfg (sejug@redhat.com)
+- Fix etcd conditional check failure (admin@webresource.nl)
+- Remove invalid when: from vars: (rteague@redhat.com)
+
+* Tue Jul 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.153-1
+- Updating to compare sets instead of sorted lists (ewolinet@redhat.com)
+- Adding ability to create podpreset for service-catalog-controller for
+ bz1471881 (ewolinet@redhat.com)
+- Updating to use oc replace and conditionally update edit and admin roles
+ (ewolinet@redhat.com)
+- Other playbooks maybe expecting this to be at least an empty string. I think
+ they default it to an empty list if its not found. (tbielawa@redhat.com)
+- Fix NO_PROXY environment variable setting (tbielawa@redhat.com)
+- Changing the passing of data for sc creation. (kwoodson@redhat.com)
+- Fixed variable name. (kwoodson@redhat.com)
+- Adding disk encryption to storageclasses and to openshift registry
+ (kwoodson@redhat.com)
+
+* Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1
+-
+
+* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1
+-
+
+* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1
+-
+
+* Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1
+- Config was missed before replace. (jkaur@redhat.com)
+- Redeploy-certificates will fail for registry and router if user is not
+ system:admin (jkaur@redhat.com)
+
+* Fri Jul 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.148-1
+- Adding in permissions to edit and admin cluster roles (ewolinet@redhat.com)
+- making kube-service-catalog project network global when using redhat
+ /openshift-ovs-multitenant plugin (ewolinet@redhat.com)
+- set KillMode to process in node service file (jchaloup@redhat.com)
+- Upgrade fails when "Drain Node for Kubelet upgrade" (jkaur@redhat.com)
+- openvswitch, syscontainer: specify the Docker service name
+ (gscrivan@redhat.com)
+
+* Thu Jul 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.144-1
+- Created js file for enabling tech preview for console, updated master-config
+ for pod presets and console tech preview (ewolinet@redhat.com)
+- GlusterFS: Add updated example hosts files (jarrpa@redhat.com)
+- GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)
+
+* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1
+-
+
+* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1
+- add scheduled pods check (jvallejo@redhat.com)
+- Only store failures that were not ignored. (rhcarvalho@gmail.com)
+- Add overlay to supported Docker storage drivers (rhcarvalho@gmail.com)
+- ansible.cfg: improve ssh ControlPath (lmeyer@redhat.com)
+- openshift_checks: fix execute_module params (lmeyer@redhat.com)
+- OCP build: override python-directed envvars (lmeyer@redhat.com)
+- OCP build: fix bug 1465724 (lmeyer@redhat.com)
+- OCP build: sync packages needed (lmeyer@redhat.com)
+- Adding create permissions for serviceclasses.servicecatalog.k8s.io to
+ service-catalog-controller role (ewolinet@redhat.com)
+- Fix calico when certs are auto-generated (djosborne10@gmail.com)
+- Removing trailing newline. (kwoodson@redhat.com)
+- Error upgrading control_plane when user is not system:admin
+ (jkaur@redhat.com)
+- [Bz 1468113] Configure the rest of the masters with the correct URL.
+ (kwoodson@redhat.com)
+
+* Tue Jul 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.141-1
+- Add evaluate_groups.yml to network_manager playbook (rteague@redhat.com)
+- updating fetch tasks to be flat paths (ewolinet@redhat.com)
+
+* Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1
+-
+
+* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1
+- increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com)
+
+* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.138-1
+- Wait for etcd to become healthy before migrating TTL (tbielawa@redhat.com)
+- Use openshift.node.nodename as glusterfs_hostname. (abutcher@redhat.com)
+- container-engine: Update Fedora registry url (smilner@redhat.com)
+- updating configmap map definition to fix asb not starting up correctly
+ (ewolinet@redhat.com)
+- xPaas v1.4.1 for 3.4 (sdodson@redhat.com)
+- xPaas v1.4.1 for 3.5 (sdodson@redhat.com)
+- xPaaS 1.4.1 for 3.6 (sdodson@redhat.com)
+- Only add entries to NO_PROXY settings if a NO_PROXY value is set
+ (tbielawa@redhat.com)
+- fixing configuation values. (shurley@redhat.com)
+
+* Fri Jul 07 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.137-1
+- Install container-selinux with container-engine (smilner@redhat.com)
+- Bug 1466152 - Json-file log driver: Neither
+ "openshift_logging_fluentd_use_journal=false" nor omitted collects the log
+ entries (rmeggins@redhat.com)
+- Adding serial: 1 to play to ensure we run one at a time (ewolinet@redhat.com)
+- Fix yamllint (sdodson@redhat.com)
+- Workaround seboolean module with setsebool command. (abutcher@redhat.com)
+- Removed quotes and added env variable to be specific. (kwoodson@redhat.com)
+- [BZ 1467786] Fix for OPENSHIFT_DEFAULT_REGISTRY setting.
+ (kwoodson@redhat.com)
+- set the proper label of /var/lib/etcd directory (jchaloup@redhat.com)
+
+* Thu Jul 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.136-1
+- Synching certs and aggregator configs from first master to all other masters
+ (ewolinet@redhat.com)
+- Addressing servicecatalog doesnt have enough permissions and multimaster
+ config for service-catalog (ewolinet@redhat.com)
+- add back mux_client config that was removed (rmeggins@redhat.com)
+- use master etcd certificates when delegating oadm migrate etcd-ttl
+ (jchaloup@redhat.com)
+
+* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.135-1
+- Update the tag for enterprise service catalog (sdodson@redhat.com)
+- Fix missing service domain .svc in NO_PROXY settings (tbielawa@redhat.com)
+- drop etcdctl before the etcd_container service (jchaloup@redhat.com)
+- Fix prefix for OCP service-catalog prefix (sdodson@redhat.com)
+- Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)
+
+* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1
+-
+
+* Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1
+- etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com)
+- pre-pull images before stopping docker (jchaloup@redhat.com)
+- Always convert no_proxy from string into a list (sdodson@redhat.com)
+- fix 1466680. Fix logging deploying to the specified namespace
+ (jcantril@redhat.com)
+- logging_es: temporarily disable readiness probe (jwozniak@redhat.com)
+- Fixes to storage migration (sdodson@redhat.com)
+
+* Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1
+-
+
+* Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1
+- Fix upgrade (sdodson@redhat.com)
+- Prevent the script to use default route ip as upstream nameserver.
+ (steveteuber@users.noreply.github.com)
+- Use default ports for dnsmasq and node dns (sdodson@redhat.com)
+- Run dns on the node and use that for dnsmasq (sdodson@redhat.com)
+- Using ca-bundle.crt to connect to local etcd if master.etcd-ca.crt DNE
+ (ewolinet@redhat.com)
+- Set OPENSHIFT_DEFAULT_REGISTRY in registry dc. (abutcher@redhat.com)
+- Updating to use openshift.master.etcd_hosts for etcd servers for apiserver
+ (ewolinet@redhat.com)
+- Update v1.4 image streams and templates (sdodson@redhat.com)
+- xPaaS v1.4.0 for v3.4 (sdodson@redhat.com)
+- Sync latest image streams and templates for v1.5 (sdodson@redhat.com)
+- xPaaS v1.4.0 for v3.5 (sdodson@redhat.com)
+- Update latest image streams for v3.6 (sdodson@redhat.com)
+- Bump xPaas v1.4.0 for v3.6 (sdodson@redhat.com)
+- docker_image_availability: fix containerized etcd (lmeyer@redhat.com)
+- evalute etcd backup directory name only once (jchaloup@redhat.com)
+- run etcd_container with type:spc_t label (jchaloup@redhat.com)
+- Fixing ops storage options being passed to openshift_logging_elasticsearch
+ role fixing default ops pv selector (ewolinet@redhat.com)
+- Adding labels for elasticsearch and kibana services (ewolinet@redhat.com)
+- Add a retry to the docker restart handler (sdodson@redhat.com)
+- docker_storage check: make vgs return sane output (lmeyer@redhat.com)
+- Capture exceptions when resolving available checks (rhcarvalho@gmail.com)
+- PAPR: customize disk space requirements (rhcarvalho@gmail.com)
+- Enable disk check on containerized installs (rhcarvalho@gmail.com)
+- Add module docstring (rhcarvalho@gmail.com)
+- Add suggestion to check disk space in any path (rhcarvalho@gmail.com)
+- Require at least 1GB in /usr/bin/local and tempdir (rhcarvalho@gmail.com)
+- Refactor DiskAvailability for arbitrary paths (rhcarvalho@gmail.com)
+- Adding some more sections to additional considerations, being less rigid on
+ large roles for composing -- can also be a playbook (ewolinet@redhat.com)
+- Updating snippet contents, formatting and providing urls
+ (ewolinet@redhat.com)
+- Update snippets and add bullet point on role dependency (ewolinet@redhat.com)
+- Creating initial proposal doc for review (ewolinet@redhat.com)
+
+* Fri Jun 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.129-1
+- Fix generate role binding destination for the HOSA service account
+ (steveteuber@users.noreply.github.com)
+- Correct version comparisons to ensure proper evaluation (rteague@redhat.com)
+- Adding become: false to local_action tasks (ewolinet@redhat.com)
+- upgrade: fix name for the etcd system container (gscrivan@redhat.com)
+- fix backup and working directory for etcd run as a system container
+ (jchaloup@redhat.com)
+- etcd_migrate: Add /var/usrlocal/bin to path for oadm (smilner@redhat.com)
+- etcd_migrate: Add /usr/local/bin to path for oadm (smilner@redhat.com)
+- Sync environment variables FLUENTD/MUX_CPU_LIMIT FLUENTD/MUX_MEMORY_LIMIT
+ with the resource limit values. (nhosoi@redhat.com)
+- Update master configuration for named certificates during master cert
+ redeploy. (abutcher@redhat.com)
+- Get rid of openshift_facts dep in rhel_subscribe (sdodson@redhat.com)
+- logging: write ES heap dump to persistent storage (jwozniak@redhat.com)
+
+* Thu Jun 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.128-1
+- parameterize etcd binary path (fabian@fabianism.us)
+- attach leases via the first master only and only once (jchaloup@redhat.com)
+- evalute groups when running etcd upgrade from byo/openshift-
+ cluster/upgrades/upgrade_etcd.yml (jchaloup@redhat.com)
+- Bug 1465168 - mux doesn't recognize ansible boolean parameters correctly
+ (rmeggins@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1003-1
+- Generate loopback kubeconfig separately to preserve OpenShift CA certificate.
+ (abutcher@redhat.com)
+- registry: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- router: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- Retry docker startup once (sdodson@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1002-1
+- Fix typo in fluentd_secureforward_contents variable
+ (Andreas.Dembach@dg-i.net)
+- Reverting quotation change in ansible_service_broker install for etcd
+ (ewolinet@redhat.com)
+
+* Mon Jun 26 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1001-1
+- oc_atomic_container: use rpm to check the version. (gscrivan@redhat.com)
+- Fix .spec for stagecut (jupierce@redhat.com)
+- Picking change from sdodson (ewolinet@redhat.com)
+- openshift_version: skip nfs and lb hosts (smilner@redhat.com)
+- openshift_checks: eval groups before including role (lmeyer@redhat.com)
+- Adding volume fact for etcd for openshift ansible service broker
+ (ewolinet@redhat.com)
+- Updating to label node and wait for apiservice to be healthy and started
+ (ewolinet@redhat.com)
+- Also configure default registry on HA masters (sdodson@redhat.com)
+- Fix parsing certs with very large serial numbers (tbielawa@redhat.com)
+- fix yamllint issues (fabian@fabianism.us)
+- openshift_logging: use empty default for storage labels (fsimonce@redhat.com)
+- Set clean install and etcd storage on first master to fix scaleup
+ (sdodson@redhat.com)
+- images, syscontainer: change default value for ANSIBLE_CONFIG
+ (gscrivan@redhat.com)
+- Cleanup/updates for env variables and etcd image (fabian@fabianism.us)
+- Sync 3.5 cfme templates over to 3.6 (sdodson@redhat.com)
+- Moving checks down after required initialization happens.
+ (kwoodson@redhat.com)
+- add play and role to install ansible-service-broker (fabian@fabianism.us)
+- Creation of service_catalog and placeholder broker roles
+ (ewolinet@redhat.com)
+- GlusterFS: Use proper namespace for heketi command and service account
+ (jarrpa@redhat.com)
+- Fixing quote issue. (kwoodson@redhat.com)
+- GlusterFS: Fix heketi secret name (jarrpa@redhat.com)
+- Fix for dynamic pvs when using storageclasses. (kwoodson@redhat.com)
+- Ensure that host pki tree is mounted in containerized components
+ (sdodson@redhat.com)
+
+* Fri Jun 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.123-1
+- releases: enable build/push with multiple tags (lmeyer@redhat.com)
+- Update template examples for 3.6 (rteague@redhat.com)
+- Reverting v prefix introduced by stagecut (smunilla@redhat.com)
+- Fixed readme doc. (kwoodson@redhat.com)
+- Adding version field for stagecut (smunilla@redhat.com)
+- Remove package_update from install playbook (rhcarvalho@gmail.com)
+- Restart NetworkManager only if dnsmasq was used
+ (bliemli@users.noreply.github.com)
+- remove extra close brace in example inventory (gpei@redhat.com)
+- Adding option for serviceAccountConfig.limitSecretReferences
+ (kwoodson@redhat.com)
+- doc: Add system_container examples to inventory (smilner@redhat.com)
+- system_containers: Add openshift_ to other system_container vars
+ (smilner@redhat.com)
+- system_containers: Add openshift_ to use_system_containers var
+ (smilner@redhat.com)
+- detect etcd service name based on etcd runtime when restarting
+ (jchaloup@redhat.com)
+- set proper etcd_data_dir for system container (jchaloup@redhat.com)
+- etcd, system_container: do not mask etcd_container (gscrivan@redhat.com)
+- etcd, system_container: do not enable system etcd (gscrivan@redhat.com)
+- oc_atomic_container: Require 1.17.2 (smilner@redhat.com)
+- Verify matched openshift_upgrade_nodes_label (rteague@redhat.com)
+- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
+
+* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
+-
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
+- Updating default from null to "" (ewolinet@redhat.com)
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.120-1
+- Update atomic-openshift-master.j2 (sdodson@redhat.com)
+- Enable push to registry via dns only on clean 3.6 installs
+ (sdodson@redhat.com)
+- Disable actually pushing to the registry via dns for now (sdodson@redhat.com)
+- Add openshift_node_dnsmasq role to upgrade (sdodson@redhat.com)
+- Push to the registry via dns (sdodson@redhat.com)
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.119-1
+- Temporarilly only migrate jobs as we were before (sdodson@redhat.com)
+- Disable TLS verification in skopeo inspect (rhcarvalho@gmail.com)
+- Preserve etcd3 storage if it's already in use (sdodson@redhat.com)
+- GlusterFS: Generate better secret keys (jarrpa@redhat.com)
+- GlusterFS: Fix error when groups.glusterfs_registry is undefined.
+ (jarrpa@redhat.com)
+- GlusterFS: Use proper identity in heketi secret (jarrpa@redhat.com)
+- GlusterFS: Allow configuration of heketi port (jarrpa@redhat.com)
+- GlusterFS: Fix variable typo (jarrpa@redhat.com)
+- GlusterFS: Minor template fixes (jarrpa@redhat.com)
+- registry: mount GlusterFS storage volume from correct host
+ (jarrpa@redhat.com)
+
+* Mon Jun 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.117-1
+- Run storage upgrade pre and post master upgrade (rteague@redhat.com)
+- Introduce etcd migrate role (jchaloup@redhat.com)
+- Add support for rhel, aci, vxlan (srampal@cisco.com)
+
+* Sun Jun 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.116-1
+- PAPR: define openshift_image_tag via command line (rhcarvalho@gmail.com)
+- Ensure only one ES pod per PV (peter.portante@redhat.com)
+- etcd v3 for clean installs (sdodson@redhat.com)
+- Rename cockpit-shell -> cockpit-system (rhcarvalho@gmail.com)
+- Update image repo name, images have been moved from 'cloudforms' to
+ 'cloudforms42' for CF 4.2. (simaishi@redhat.com)
+- Update image repo name, images have been moved from 'cloudforms' to
+ 'cloudforms45' for CF 4.5. (simaishi@redhat.com)
+- CloudForms 4.5 templates (simaishi@redhat.com)
+
+* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1
+-
+
+* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1
+- Make rollout status check best-effort, add poll (skuznets@redhat.com)
+- Verify the rollout status of the hosted router and registry
+ (skuznets@redhat.com)
+- fix es routes for new logging roles (rmeggins@redhat.com)
+
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.112-1
+- Add the the other featured audit-config paramters as example (al-
+ git001@none.at)
+
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.111-1
+- doc: Info for system container installer options (smilner@redhat.com)
+- Add ANSIBLE_CONFIG to system container installer (smilner@redhat.com)
+- Add missing file. Remove debugging prompt. (tbielawa@redhat.com)
+- Update readme one last time (tbielawa@redhat.com)
+- Reconfigure masters in serial to avoid HA meltdowns (tbielawa@redhat.com)
+- First POC of a CFME turnkey solution in openshift-anisble
+ (tbielawa@redhat.com)
+- Reverted most of this pr 4356 except: adding
+ openshift_logging_fluentd_buffer_queue_limit: 1024
+ openshift_logging_fluentd_buffer_size_limit: 1m
+ openshift_logging_mux_buffer_queue_limit: 1024
+ openshift_logging_mux_buffer_size_limit: 1m and setting the matched
+ environment variables. (nhosoi@redhat.com)
+- Adding the defaults for openshift_logging_fluentd_{cpu,memory}_limit to
+ roles/openshift_logging_fluentd/defaults/main.yml. (nhosoi@redhat.com)
+- Adding environment variables FLUENTD_CPU_LIMIT, FLUENTD_MEMORY_LIMIT,
+ MUX_CPU_LIMIT, MUX_MEMORY_LIMIT. (nhosoi@redhat.com)
+- Introducing fluentd/mux buffer_queue_limit, buffer_size_limit, cpu_limit, and
+ memory_limit. (nhosoi@redhat.com)
+
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.110-1
+- papr: add documentation to YAML and simplify context (jlebon@redhat.com)
+- docs: better documentation for PAPR (jlebon@redhat.com)
+- papr: install libffi-devel (jlebon@redhat.com)
+- pre-install checks: add more during byo install (lmeyer@redhat.com)
+- move etcd backup to etcd_common role (jchaloup@redhat.com)
+- Support installing HOSA via ansible (mwringe@redhat.com)
+- GlusterFS: Remove requirement for heketi-cli (jarrpa@redhat.com)
+- GlusterFS: Fix bugs in wipe (jarrpa@redhat.com)
+- GlusterFS: Skip heketi-cli install on Atomic (jarrpa@redhat.com)
+- GlusterFS: Create a StorageClass if specified (jarrpa@redhat.com)
+- GlusterFS: Use proper secrets (jarrpa@redhat.com)
+- GlusterFS: Allow cleaner separation of multiple clusters (jarrpa@redhat.com)
+- GlusterFS: Minor corrections and cleanups (jarrpa@redhat.com)
+- GlusterFS: Improve documentation (jarrpa@redhat.com)
+- GlusterFS: Allow configuration of kube namespace for heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Adjust when clauses for registry config (jarrpa@redhat.com)
+- GlusterFS: Allow failure reporting when deleting deploy-heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Tweak pod probe parameters (jarrpa@redhat.com)
+- GlusterFS: Allow for configuration of node selector (jarrpa@redhat.com)
+- GlusterFS: Label on Openshift node name (jarrpa@redhat.com)
+- GlusterFS: Make sure timeout is an int (jarrpa@redhat.com)
+- GlusterFS: Use groups variables (jarrpa@redhat.com)
+- papr: rename redhat-ci related files to papr (jlebon@redhat.com)
+- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
+-
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
+- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.107-1
+- Disable negative caching, set cache TTL to 1s (skuznets@redhat.com)
+- Update mounts in system container installer (smilner@redhat.com)
+- Set ansible retry file location (smilner@redhat.com)
+- installer: add bind mount for /etc/resolv.conf (gscrivan@redhat.com)
+- Making pylint happy (ewolinet@redhat.com)
+- Fix possible access to undefined variable (rhcarvalho@gmail.com)
+- certificates: copy the certificates for the etcd system container
+ (gscrivan@redhat.com)
+- Separate etcd and OpenShift CA redeploy playbooks. (abutcher@redhat.com)
+- lib/base: allow for results parsing on non-zero return code
+ (jarrpa@redhat.com)
+- etcd: system container defines ETCD_(PEER_)?TRUSTED_CA_FILE
+ (gscrivan@redhat.com)
+- etcd: unmask system container service before installing it
+ (gscrivan@redhat.com)
+- etcd: copy previous database when migrating to system container
+ (gscrivan@redhat.com)
+- etcd: define data dir location for the system container (gscrivan@redhat.com)
+- oc_obj: set _delete() rc to 0 if err is 'not found' (jarrpa@redhat.com)
+- oc_obj: only check 'items' if exists in delete (jarrpa@redhat.com)
+- Removed hardocded Calico Policy Controller URL (vincent.schwarzer@yahoo.de)
+- Allowing openshift_metrics to specify PV selectors and allow way to define
+ selectors when creating pv (ewolinet@redhat.com)
+
+* Tue Jun 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.100-1
+- Change default key for gce (hekumar@redhat.com)
+- set etcd working directory for embedded etcd (jchaloup@redhat.com)
+- Add daemon-reload handler to openshift_node and notify when /etc/systemd
+ files have been updated. (abutcher@redhat.com)
+- Use volume.beta.kubernetes.io annotation for storage-classes
+ (per.carlson@vegvesen.no)
+- Correct master-config update during upgrade (rteague@redhat.com)
+
+* Mon Jun 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.99-1
+- Replace repoquery with module (jchaloup@redhat.com)
+- Consider previous value of 'changed' when updating (rhcarvalho@gmail.com)
+- Improve code readability (rhcarvalho@gmail.com)
+- Disable excluder only on nodes that are not masters (jchaloup@redhat.com)
+- Added includes to specify openshift version for libvirt cluster create.
+ Otherwise bin/cluster create fails on unknown version for libvirt deployment.
+ (schulthess@puzzle.ch)
+- docker checks: finish and refactor (lmeyer@redhat.com)
+- oc_secret: allow use of force for secret type (jarrpa@redhat.com)
+- add docker storage, docker driver checks (jvallejo@redhat.com)
+- Add dependency and use same storageclass name as upstream
+ (hekumar@redhat.com)
+- Add documentation (hekumar@redhat.com)
+- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
+
+* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
+-
+
+* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
+- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
+- Updating kibana to store session and oauth secrets for reuse, fix oauthclient
+ generation for ops (ewolinet@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.5-1
+- Rename container image to origin-ansible / ose-ansible (pep@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.4-1
+- Guard check for container install based on openshift dictionary key
+ (ayoung@redhat.com)
+- Separate client config removal in uninstall s.t. ansible_ssh_user is removed
+ from with_items. (abutcher@redhat.com)
+- Remove supported/implemented barrier for registry object storage providers.
+ (abutcher@redhat.com)
+- Add node unit file on upgrade (smilner@redhat.com)
+- fix up openshift-ansible for use with 'oc cluster up' (jcantril@redhat.com)
+- specify all logging index mappings for kibana (jcantril@redhat.com)
+- openshift-master: set r_etcd_common_etcd_runtime (gscrivan@redhat.com)
+- rename daemon.json to container-daemon.json (smilner@redhat.com)
+- Updating probe timeout and exposing variable to adjust timeout in image
+ (ewolinet@redhat.com)
+- Do not attempt to override openstack nodename (jdetiber@redhat.com)
+- Update image stream to openshift/origin:2c55ade (skuznets@redhat.com)
+
+* Wed Jun 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.3-1
+- Use local openshift.master.loopback_url when generating initial master
+ loopback kubeconfigs. (abutcher@redhat.com)
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
+-
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
+- Updating image for registry_console (ewolinet@redhat.com)
+- add elasticseatch, fluentd, kibana check (jvallejo@redhat.com)
+- show correct default value in inventory (mmckinst@redhat.com)
+- Skip service restarts within ca redeployment playbook when expired
+ certificates are detected. (abutcher@redhat.com)
+- Add mtu setting to /etc/sysconfig/docker-network (sdodson@redhat.com)
+- Add daemon_reload parameter to service tasks (tbielawa@redhat.com)
+- mux uses fluentd cert/key to talk to ES (rmeggins@redhat.com)
+- fix curator host, port params; remove curator es volumes
+ (rmeggins@redhat.com)
+- add mux docs; allow to specify mux namespaces (rmeggins@redhat.com)
+- oc_secret: allow for specifying secret type (jarrpa@redhat.com)
+- Revert "Merge pull request #4271 from DG-i/master" (skuznets@redhat.com)
+- verify upgrade targets separately for each group (masters, nodes, etcd)
+ (jchaloup@redhat.com)
+- Updating Kibana-proxy secret key name, fixing deleting secrets, fixed extra
+ ES dc creation (ewolinet@redhat.com)
+- upgrade: Reload systemd before restart (smilner@redhat.com)
+- Skip router/registry cert redeploy when
+ openshift_hosted_manage_{router,registry}=false (abutcher@redhat.com)
+- disable docker excluder before it is updated to remove older excluded
+ packages (jchaloup@redhat.com)
+- Support byo etcd for calico (djosborne10@gmail.com)
+- preflight int tests: fix for package_version changes (lmeyer@redhat.com)
+- Remove unnecessary comment. (rhcarvalho@gmail.com)
+- update aos_version module to support generic pkgs and versions
+ (jvallejo@redhat.com)
+- Add separate variables for control plane nodes (sdodson@redhat.com)
+- Copy Nuage VSD generated user certificates to Openshift master nodes
+ (sneha.deshpande@nokia.com)
+- add existing_ovs_version check (jvallejo@redhat.com)
+- Tolerate failures in the node upgrade playbook (sdodson@redhat.com)
+
+* Wed May 31 2017 Scott Dodson <sdodson@redhat.com> 3.6.89.0-1
+- AMP 2.0 (sdodson@redhat.com)
+- add support for oc_service for labels, externalIPs (rmeggins@redhat.com)
+- [JMAN4-161] Add templates and pv example for cloudforms jboss middleware
+ manager (pgier@redhat.com)
+
+* Wed May 31 2017 Scott Dodson <sdodson@redhat.com> 3.6.89-1
+- Adding default value for openshift_hosted_logging_storage_kind
+ (ewolinet@redhat.com)
+- memory check: use GiB/MiB and adjust memtotal (lmeyer@redhat.com)
+- bool (sdodson@redhat.com)
+- Metrics: update the imagePullPolicy to be always (mwringe@redhat.com)
+- Remove typos that got reintroduced (smilner@redhat.com)
+- oc_atomic_container: Workaround for invalid json from atomic command
+ (smilner@redhat.com)
+- Remove system-package=no from container-engine install (smilner@redhat.com)
+- oc_atomic_container: Hard code system-package=no (smilner@redhat.com)
+- Updating to generate PVC when storage type is passed in as nfs
+ (ewolinet@redhat.com)
+- disable become for local actions (Mathias.Merscher@dg-i.net)
+- check for rpm version and docker image version equality only if
+ openshift_pkg_version and openshift_image_tag are not defined
+ (jchaloup@redhat.com)
+
+* Tue May 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.86-1
+- Reduce memory requirement to 2gb for fedora ci jobs (sdodson@redhat.com)
+- openshift_logging: increasing *_elasticsearch_* default CPU and memory
+ (jwozniak@redhat.com)
+- Updating python-passlib assert (ewolinet@redhat.com)
+- allow to configure oreg_url specifically for node or master. refs #4233
+ (tobias@tobru.ch)
+- Updating registry-console version to be v3.6 instead of 3.6
+ (ewolinet@redhat.com)
+
+* Thu May 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.85-1
+- Prepending v to registry-console version (ewolinet@redhat.com)
+- memory health check: adjust threshold for etcd (lmeyer@redhat.com)
+- health checks: specify check skip reason (lmeyer@redhat.com)
+- health checks: configure failure output in playbooks (lmeyer@redhat.com)
+- disk/memory checks: make threshold configurable (lmeyer@redhat.com)
+- Show help on how to disable checks after failure (rhcarvalho@gmail.com)
+- Allow disabling checks via Ansible variable (rhcarvalho@gmail.com)
+- Verify memory and disk requirements before install (rhcarvalho@gmail.com)
+- filter_plugins: Allow for multiple pairs in map_from_pairs()
+ (jarrpa@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.84-1
+- oc_process: Better error output on failed template() call (jarrpa@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.83-1
+- Allow a hostname to resolve to 127.0.0.1 during validation (dms@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.82-1
+- Fixing tux warnings and some final clean up (ewolinet@redhat.com)
+- Appease travis (sdodson@redhat.com)
+- preflight int tests: fix test flake (lmeyer@redhat.com)
+- Add a readiness probe to the Kibana container (skuznets@redhat.com)
+- Create logging deployments with non-zero replica counts (skuznets@redhat.com)
+- Pulling changes from master branch (ewolinet@redhat.com)
+- Adding some missing changes (ewolinet@redhat.com)
+- fixing available variables for 2.3.0 (ewolinet@redhat.com)
+- Updating pvc generation names (ewolinet@redhat.com)
+- updating delete_logging to use modules (ewolinet@redhat.com)
+- Pulling in changes from master (ewolinet@redhat.com)
+- Decomposing openshift_logging role into subcomponent roles
+ (ewolinet@redhat.com)
+- Fix renaming error with calico template files (djosborne10@gmail.com)
+
+* Tue May 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.80-1
+- RPM workaround for the move of cert playbooks (pep@redhat.com)
+- health check playbooks: relocate and expand (lmeyer@redhat.com)
+
+* Tue May 23 2017 Scott Dodson <sdodson@redhat.com> 3.6.69-1
+- preflight int tests: fix for openshift_version dep (lmeyer@redhat.com)
+- Removing requirement to pass aws credentials (esauer@redhat.com)
+- Workaround sysctl module issue with py3 by converting task to lineinfile.
+ (abutcher@redhat.com)
+- inventory: rename certificates->certificate in router example
+ (smilner@redhat.com)
+- remove skopeo dependency on docker-py (jvallejo@redhat.com)
+- improve error handling for missing vars (jvallejo@redhat.com)
+- lib/base: Allow for more complex template params (jarrpa@redhat.com)
+- Fix yamllint problems (sdodson@redhat.com)
+- add ability to expose Elasticsearch as an external route
+ (rmeggins@redhat.com)
+- Parameterized Calico/Node Arguments (vincent.schwarzer@yahoo.de)
+- Fix auditConfig for non-HA environments (rteague@redhat.com)
+- Added Docker Registry Port 5000 to Firewalld (vincent.schwarzer@yahoo.de)
+- Added Calicoctl to deployment of Master Nodes (vincent.schwarzer@yahoo.de)
+- move etcd upgrade related code into etcd_upgrade role (jchaloup@redhat.com)
+- Localhost TMP Dir Fix (vincent.schwarzer@yahoo.de)
+- Adjusted Naming Schema of Calico Roles (vincent.schwarzer@yahoo.de)
+- Update hosts.*.example to include openshift_hosted_metrics_deployer_version
+ (pat2man@gmail.com)
+- Fix gpg key path in our repo (sdodson@redhat.com)
+- Uninstall: restart docker when container-engine restart hasn't changed.
+ (abutcher@redhat.com)
+- add etcd cluster size check (jvallejo@redhat.com)
+- fix etcd_container_version detection (jchaloup@redhat.com)
+- systemcontainercustom.conf.j2: use Environment instead of ENVIRONMENT
+ (gscrivan@redhat.com)
+- node, systemd: change Requires to Wants for openvswitch (gscrivan@redhat.com)
+- Add teams attribute to github identity provider (dms@redhat.com)
+- Don't escalate privileges in local tmpdir creation (skuznets@redhat.com)
+- Remove use of local_action with delegate_to and switch 'delegate_to:
+ localhost' temporary directory cleanup actions to local_actions.
+ (abutcher@redhat.com)
+- Rework openshift_excluders role (rteague@redhat.com)
+- Add regexp for container-engine lineinfile (smilner@redhat.com)
+- Default image policy on new clusters to on (ccoleman@redhat.com)
+- revert role-specific var name (jvallejo@redhat.com)
+- Filter non-strings from the oc_adm_ca_server_cert hostnames parameter.
+ (abutcher@redhat.com)
+- Don't set-up origin repositories if they've already been configured
+ (dms@redhat.com)
+- byo inventory versions 1.5 -> 3.6 (smilner@redhat.com)
+- byo inventory versions 3.5 -> 3.6 (smilner@redhat.com)
+- use dest instead of path for lineinfile (smilner@redhat.com)
+- openshift_version: skip rpm version==image version on Atomic
+ (gscrivan@redhat.com)
+- Add NO_PROXY workaround for container-engine atomic command
+ (smilner@redhat.com)
+- Add no_proxy to atomic.conf (smilner@redhat.com)
+- Include object validation in 3.6 upgrades (sdodson@redhat.com)
+- uninstall: handle container-engine (gscrivan@redhat.com)
+- Added Calico BGP Port 179 to Firewalld (vincent.schwarzer@yahoo.de)
+- Fixed for python3 with Fedora 25 Atomic (donny@fortnebula.com)
+- Add docker package for container-engine install (smilner@redhat.com)
+- Fix python3 error in repoquery (jpeeler@redhat.com)
+- check if hostname is in list of etcd hosts (jvallejo@redhat.com)
+- Fix templating of static service files (rteague@redhat.com)
+- Fix container image build references (pep@redhat.com)
+- Reset selinux context on /var/lib/origin/openshift.common.volumes
+ (sdodson@redhat.com)
+- Adding assert to check for python-passlib on control host
+ (ewolinet@redhat.com)
+- Update variable name to standard (rhcarvalho@gmail.com)
+- Make class attribute name shorter (rhcarvalho@gmail.com)
+- Add module docstring (rhcarvalho@gmail.com)
+- Update check (rhcarvalho@gmail.com)
+- Change based on feedback (vincent.schwarzer@yahoo.de)
+- Removed Hardcoded Calico URLs (vincent.schwarzer@yahoo.de)
+- int -> float (rhcarvalho@gmail.com)
+- Remove vim line (rhcarvalho@gmail.com)
+- add etcd volume check (jvallejo@redhat.com)
+- Added additional Calico Network Plugin Checks (vincent.schwarzer@yahoo.de)
+- Ensure good return code for specific until loops (smilner@redhat.com)
+- add template service broker configurable (jminter@redhat.com)
+- Prevent line wrap in yaml dump of IDP, fixes #3912 (rikkuness@gmail.com)
+
+* Sat May 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.68-1
+- Updating registry-console image version during a post_control_plane upgrade
+ (ewolinet@redhat.com)
+- Remove userland-proxy-path from daemon.json (smilner@redhat.com)
+- Fix whistespace issues in custom template (smilner@redhat.com)
+- Always add proxy items to atomic.conf (smilner@redhat.com)
+- Move container-engine systemd environment to updated location
+ (smilner@redhat.com)
+- doc: Add link to daemon.json upstream doc (smilner@redhat.com)
+- Remove unused daemon.json keys (smilner@redhat.com)
+- bug 1448860. Change recovery_after_nodes to match node_quorum
+ (jcantril@redhat.com)
+- bug 1441369. Kibana memory limits bug 1439451. Kibana crash
+ (jcantril@redhat.com)
+- Extend repoquery command (of lib_utils role) to ignore excluders
+ (jchaloup@redhat.com)
+- lower case in /etc/daemon.json and correct block-registry (ghuang@redhat.com)
+- Fix for yedit custom separators (mwoodson@redhat.com)
+- Updating 3.6 enterprise registry-console template image version
+ (ewolinet@redhat.com)
+- Default to iptables on master (sdodson@redhat.com)
+- Rename blocked-registries to block-registries (smilner@redhat.com)
+- Ensure true is lowercase in daemon.json (smilner@redhat.com)
+- use docker_log_driver and /etc/docker/daemon.json to determine log driver
+ (rmeggins@redhat.com)
+- Temporarily revert to OSEv3 host group usage (rteague@redhat.com)
+- Add service file templates for master and node (smilner@redhat.com)
+- Update systemd units to use proper container service name
+ (smilner@redhat.com)
+- polish etcd_common role (jchaloup@redhat.com)
+- Note existence of Fedora tests and how to rerun (rhcarvalho@gmail.com)
+- Fix for OpenShift SDN Check (vincent.schwarzer@yahoo.de)
+- Updating oc_obj to use get instead of getattr (ewolinet@redhat.com)
+- Updating size suffix for metrics in role (ewolinet@redhat.com)
+- GlusterFS: Allow swapping an existing registry's backend storage
+ (jarrpa@redhat.com)
+- GlusterFS: Allow for a separate registry-specific playbook
+ (jarrpa@redhat.com)
+- GlusterFS: Improve role documentation (jarrpa@redhat.com)
+- hosted_registry: Get correct pod selector for GlusterFS storage
+ (jarrpa@redhat.com)
+- hosted registry: Fix typo (jarrpa@redhat.com)
+- run excluders over selected set of hosts during control_plane/node upgrade
+ (jchaloup@redhat.com)
+- Reserve kubernetes and 'kubernetes-' prefixed namespaces
+ (jliggitt@redhat.com)
+- oc_volume: Add missing parameter documentation (jarrpa@redhat.com)
+
+* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.67-1
+- byo: correct option name (gscrivan@redhat.com)
+- Fail if rpm version != docker image version (jchaloup@redhat.com)
+- Perform package upgrades in one transaction (sdodson@redhat.com)
+- Properly fail if OpenShift RPM version is undefined (rteague@redhat.com)
+
+* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.66-1
+- Fix issue with Travis-CI using old pip version (rteague@redhat.com)
+- Remove vim configuration from Python files (rhcarvalho@gmail.com)
+- Use local variables for daemon.json template (smilner@redhat.com)
+- Fix additional master cert & client config creation. (abutcher@redhat.com)
+
+* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
+-
+
+* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
+-
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
+-
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
+- Updating logging and metrics to restart api, ha and controllers when updating
+ master config (ewolinet@redhat.com)
+- Adding defaults for es_indices (ewolinet@redhat.com)
+- Updating logic for generating pvcs and their counts to prevent reuse when
+ looping (ewolinet@redhat.com)
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.58-1
+- Moving Dockerfile content to images dir (jupierce@redhat.com)
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
+-
+
+* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
+-
+
+* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
+- Fix 1448368, and some other minors issues (ghuang@redhat.com)
+- mux startup is broken without this fix (rmeggins@redhat.com)
+- Dockerfile: create symlink for /opt/app-root/src (gscrivan@redhat.com)
+- docs: Add basic system container dev docs (smilner@redhat.com)
+- installer: Add system container variable for log saving (smilner@redhat.com)
+- installer: support running as a system container (gscrivan@redhat.com)
+
+* Fri May 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.54-1
+- Allow oc_ modules to pass unicode results (rteague@redhat.com)
+- Ensure repo cache is clean on the first run (rteague@redhat.com)
+- move etcdctl.yml from etcd to etcd_common role (jchaloup@redhat.com)
+- Modified pick from release-1.5 for updating hawkular htpasswd generation
+ (ewolinet@redhat.com)
+
+* Thu May 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.53-1
+- Correctly setting the primary and replica shard count settings
+ (ewolinet@redhat.com)
+- System container docker (smilner@redhat.com)
+- Stop logging AWS credentials in master role. (dgoodwin@redhat.com)
+- Remove set operations from openshift_master_certificates iteration.
+ (abutcher@redhat.com)
+- Refactor system fact gathering to avoid dictionary size change during
+ iteration. (abutcher@redhat.com)
+- Refactor secret generation for python3. (abutcher@redhat.com)
+- redhat-ci: use requirements.txt (jlebon@redhat.com)
+
+* Wed May 03 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.52-1
+- Making mux with_items list evaluate as empty if didnt get objects before
+ (ewolinet@redhat.com)
+- etcd Upgrade Refactor (rteague@redhat.com)
+- v3.3 Upgrade Refactor (rteague@redhat.com)
+- v3.4 Upgrade Refactor (rteague@redhat.com)
+- v3.5 Upgrade Refactor (rteague@redhat.com)
+- v3.6 Upgrade Refactor (rteague@redhat.com)
+- Fix variants for v3.6 (rteague@redhat.com)
+- Normalizing groups. (kwoodson@redhat.com)
+- Use openshift_ca_host's hostnames to sign the CA (sdodson@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.51-1
+- Remove std_include from playbooks/byo/rhel_subscribe.yml
+ (abutcher@redhat.com)
+- Adding way to add labels and nodeselectors to logging project
+ (ewolinet@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.50-1
+- Don't double quote when conditions (sdodson@redhat.com)
+- Remove jinja template delimeters from when conditions (sdodson@redhat.com)
+- move excluder upgrade validation tasks under openshift_excluder role
+ (jchaloup@redhat.com)
+- Fix test compatibility with OpenSSL 1.1.0 (pierre-
+ louis.bonicoli@libregerbil.fr)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.49-1
+- Warn users about conflicts with docker0 CIDR range (lpsantil@gmail.com)
+- Bump ansible rpm dependency to 2.2.2.0 (sdodson@redhat.com)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.48-1
+-
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.47-1
+-
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.46-1
+- Contrib: Hook to verify modules match assembled fragments
+ (tbielawa@redhat.com)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.45-1
+-
+
+* Sun Apr 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.44-1
+- Refactor etcd roles (jchaloup@redhat.com)
+
+* Sat Apr 29 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.43-1
+- Document the Pull Request process (rhcarvalho@gmail.com)
+- Add Table of Contents (rhcarvalho@gmail.com)
+- Improve Contribution Guide (rhcarvalho@gmail.com)
+- Replace absolute with relative URLs (rhcarvalho@gmail.com)
+- Move repo structure to a separate document (rhcarvalho@gmail.com)
+- Remove outdated information about PRs (rhcarvalho@gmail.com)
+- Move link to BUILD.md to README.md (rhcarvalho@gmail.com)
+- Adding checks for starting mux for 2.2.0 (ewolinet@redhat.com)
+- Fix OpenShift registry deployment on OSE 3.2 (lhuard@amadeus.com)
+
+* Fri Apr 28 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.42-1
+- Fix certificate check Job examples (pep@redhat.com)
+- Add python-boto requirement (pep@redhat.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.41-1
+- Add bool for proper conditional handling (rteague@redhat.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.40-1
+- Fix cluster creation with `bin/cluster` when there’s no glusterfs node
+ (lhuard@amadeus.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.39-1
+- Move container build instructions to BUILD.md (pep@redhat.com)
+- Elaborate container image usage instructions (pep@redhat.com)
+
+* Wed Apr 26 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.38-1
+- .redhat-ci.yml: also publish journal logs (jlebon@redhat.com)
+- Standardize all Origin versioning on 3.6 (rteague@redhat.com)
+- integration tests: add CI scripts (lmeyer@redhat.com)
+- preflight int tests: define image builds to support tests (lmeyer@redhat.com)
+- preflight int tests: generalize; add tests (lmeyer@redhat.com)
+- Add stub of preflight integration tests (rhcarvalho@gmail.com)
+- Move Python unit tests to subdirectory (rhcarvalho@gmail.com)
+- Revert "Add /etc/sysconfig/etcd to etcd_container" (sdodson@redhat.com)
+- Replace original router cert variable names. (abutcher@redhat.com)
+- oc_obj: Allow for multiple kinds in delete (jarrpa@redhat.com)
+- Update v1.5 content (sdodson@redhat.com)
+- Update v1.6 content (sdodson@redhat.com)
+- Make the rhel_subscribe role subscribe to OSE 3.5 channel by default
+ (lhuard@amadeus.com)
+- Addressing yamllint (ewolinet@redhat.com)
+- Updating kibana-proxy secret key for server-tls entry (ewolinet@redhat.com)
+- Pick from issue3896 (ewolinet@redhat.com)
+- Cleanup comments and remove extraneous tasks (sdodson@redhat.com)
+- Store backups in /var/lib/etcd/openshift-backup (sdodson@redhat.com)
+- Create member/snap directory encase it doesn't exist (sdodson@redhat.com)
+- Copy v3 data dir when performing backup (sdodson@redhat.com)
+
+* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.37-1
+- Differentiate between service serving router certificate and custom
+ openshift_hosted_router_certificate when replacing the router certificate.
+ (abutcher@redhat.com)
+
+* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.36-1
+- Update swap disable tasks (rteague@redhat.com)
+- Removing resource version to remove object conflicts caused by race
+ conditions. (kwoodson@redhat.com)
+- cast openshift_logging_use_mux_client to bool (rmeggins@redhat.com)
+- mux does not require privileged, only hostmount-anyuid (rmeggins@redhat.com)
+- Switched Heapster to use certificates generated by OpenShift
+ (juraci@kroehling.de)
+- Use metrics and logging deployer tag v3.4 for enterprise (sdodson@redhat.com)
+- Remove v1.5 and v1.6 metrics/logging templates (sdodson@redhat.com)
+
+* Sun Apr 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.35-1
+-
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.34-1
+- GlusterFS: provide default for groups.oo_glusterfs_to_config in with_items
+ (jarrpa@redhat.com)
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.33-1
+- Adding module calls instead of command for idempotency. (kwoodson@redhat.com)
+- Use return_value when value is constant (pierre-
+ louis.bonicoli@libregerbil.fr)
+- Add missing mock for locate_oc_binary method (pierre-
+ louis.bonicoli@libregerbil.fr)
+
+* Fri Apr 21 2017 Scott Dodson <sdodson@redhat.com> 3.6.32-1
+- Don't check excluder versions when they're not enabled (sdodson@redhat.com)
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.31-1
+- Stop all services prior to upgrading, start all services after
+ (sdodson@redhat.com)
+
+* Thu Apr 20 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.30-1
+- Add Ansible syntax checks to tox (rteague@redhat.com)
+- Add /etc/sysconfig/etcd to etcd_container (me@fale.io)
+- openshift_version: improve messaging (lmeyer@redhat.com)
+- Simplify memory availability check, review tests (rhcarvalho@gmail.com)
+- Simplify mixin class (rhcarvalho@gmail.com)
+- Simplify disk availability check, review tests (rhcarvalho@gmail.com)
+- add disk and memory availability check tests (jvallejo@redhat.com)
+- add ram and storage preflight check (jvallejo@redhat.com)
+- Fix paths for file includes (rteague@redhat.com)
+- Fix instantiation of action plugin in test fixture (rhcarvalho@gmail.com)
+- Introduce Elasticsearch readiness probe (lukas.vlcek@gmail.com)
+- added a empty file to the contiv empty dir. This allows contiv to be vendored
+ in git (mwoodson@redhat.com)
+
+* Wed Apr 19 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.29-1
+- Create openshift-metrics entrypoint playbook (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.28-1
+- Minor v3.6 upgrade docs fixes (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.27-1
+- repo: start testing PRs on Fedora Atomic Host (jlebon@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.26-1
+- Correct role dependencies (rteague@redhat.com)
+- Allow for GlusterFS to provide registry storage (jarrpa@redhat.com)
+- Integrate GlusterFS into OpenShift installation (jarrpa@redhat.com)
+- GlusterFS playbook and role (jarrpa@redhat.com)
+
+* Mon Apr 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.25-1
+- Fix default image tag for enterprise (sdodson@redhat.com)
+- Cast etcd_debug to a boolean (skuznets@redhat.com)
+
+* Fri Apr 14 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.24-1
+- tox tests: pin test requirement versions (lmeyer@redhat.com)
+- This is no longer a widely encountered issue (sdodson@redhat.com)
+- Standardize use of byo and common for network_manager.yml
+ (rteague@redhat.com)
+- Disable swap space on nodes at install and upgrade (rteague@redhat.com)
+- Do not check package version on non-master/node (rhcarvalho@gmail.com)
+
+* Thu Apr 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.23-1
+- Refactor initialize groups tasks (rteague@redhat.com)
+- tox tests: pin test requirement versions (lmeyer@redhat.com)
+- skip PackageAvailability check if not yum (jvallejo@redhat.com)
+- Document service_type for openshift-enterprise (rhcarvalho@gmail.com)
+- Remove references to outdated deployment_type (rhcarvalho@gmail.com)
+- Update deployment_type documentation (rhcarvalho@gmail.com)
+- Document merge time trends page (rhcarvalho@gmail.com)
+- Remove outdated documentation (rhcarvalho@gmail.com)
+- Remove outdated build instructions (rhcarvalho@gmail.com)
+- openshift_sanitize_inventory: disallow conflicting deployment types
+ (lmeyer@redhat.com)
+- Refactor docker upgrade playbooks (rteague@redhat.com)
+- Changed Hawkular Metrics secrets to use a format similar to the one
+ automatically generated by OpenShift (juraci@kroehling.de)
+
+* Wed Apr 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.22-1
+- Fixed spelling mistake. (kwoodson@redhat.com)
+- Remove unnecessary folder refs (rteague@redhat.com)
+- Switching commands for modules during upgrade of router and registry.
+ (kwoodson@redhat.com)
+- Fixing a compatibility issue with python 2.7 to 3.5 when reading from
+ subprocess. (kwoodson@redhat.com)
+- Refactor use of initialize_oo_option_facts.yml (rteague@redhat.com)
+- preflight checks: refactor and fix aos_version (lmeyer@redhat.com)
+- Add external provisioners playbook starting with aws efs (mawong@redhat.com)
+
+* Tue Apr 11 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.21-1
+- Adding a query for the existing docker-registry route. (kwoodson@redhat.com)
+- Removing docker-registry route from cockpit-ui. (kwoodson@redhat.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.20-1
+- Fixed a bug when oc command fails. (kwoodson@redhat.com)
+- openshift_sanitize_inventory: validate release (lmeyer@redhat.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.19-1
+- Add example scheduled certificate check (pep@redhat.com)
+- Switch from ignoring to passing on checks (rteague@redhat.com)
+- Add tests for action plugin (rhcarvalho@gmail.com)
+- Remove unnecessary code (rhcarvalho@gmail.com)
+- Make resolve_checks more strict (rhcarvalho@gmail.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.18-1
+- master-api: add mount for /var/log (gscrivan@redhat.com)
+- master: add mount for /var/log (gscrivan@redhat.com)
+- unexclude excluder if it is to be upgraded and already installed
+ (jchaloup@redhat.com)
+- Bump calico policy controller (djosborne10@gmail.com)
+- Fixed a string format and a lint space issue (kwoodson@redhat.com)
+- Fixed name and selector to be mutually exclusive (kwoodson@redhat.com)
+- Adding ability to delete by selector. (kwoodson@redhat.com)
+- Adding delete with selector support. (kwoodson@redhat.com)
+
+* Thu Apr 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.17-1
+- Adding signed router cert and fixing server_cert bug. (kwoodson@redhat.com)
+
+* Wed Apr 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.16-1
+- Removing test coverage for shared code. (kwoodson@redhat.com)
+- Port 10255 unnecessary. Removing all instances (ccallega@redhat.com)
+- oo_filters: Disable pylint too-many-lines test (jarrpa@redhat.com)
+- oo_collect: Allow list elements to be lists of dict (jarrpa@redhat.com)
+- oc_label: handle case where _get() returns no results (jarrpa@redhat.com)
+- Addressing py27-yamllint (esauer@redhat.com)
+- Add 'docker-registry.default.svc' to cert-redeploy too (sdodson@redhat.com)
+- Support unicode output when dumping yaml (rteague@redhat.com)
+- Add docker-registry.default.svc short name to registry service signing
+ (sdodson@redhat.com)
+- oc_configmap: Add missing check for name (jarrpa@redhat.com)
+- oo_collect: Update comments to show source of failure (jarrpa@redhat.com)
+- openshift_facts: Allow examples_content_version to be set to v1.6
+ (jarrpa@redhat.com)
+- Restart polkitd to workaround a bug in polkitd (sdodson@redhat.com)
+- Add names to openshift_image_tag asserts (smilner@redhat.com)
+- doc: Remove atomic-openshift deployment type (smilner@redhat.com)
+- openshift_version now requires prepended version formats (smilner@redhat.com)
+- Warn if openshift_image_tag is defined by hand for package installs
+ (smilner@redhat.com)
+- Verify openshift_image_tag is valid during openshift_version main
+ (smilner@redhat.com)
+- Add openshift_version fact fallback debug messages (smilner@redhat.com)
+- cleanup: when in openshift_version tasks are multiline (smilner@redhat.com)
+- Compatibility updates to openshift_logging role for ansible 2.2.2.0+
+ (esauer@redhat.com)
+
+* Tue Apr 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.15-1
+- Document etcd_ca_default_days in example inventories. (abutcher@redhat.com)
+- Fixed a bug. Ansible requires a msg param when module.fail_json.
+ (kwoodson@redhat.com)
+
* Sat Apr 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.14-1
- Update v1.5 content (sdodson@redhat.com)
- Add v1.6 content (sdodson@redhat.com)
diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml
index 91948c72e..eec6c23a7 100644
--- a/playbooks/adhoc/contiv/delete_contiv.yml
+++ b/playbooks/adhoc/contiv/delete_contiv.yml
@@ -1,5 +1,5 @@
---
-- name: delete contiv
+- name: Uninstall contiv
hosts: all
gather_facts: False
tasks:
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 81c1ee653..64f861c6a 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -20,7 +20,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_volume_size
- cli_device_name
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index f638fab83..507ac0f05 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -33,7 +33,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index d988a28b0..3059d3dc5 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -24,7 +24,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_docker_device
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
index b6dde357e..5e12cd181 100644
--- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -25,7 +25,7 @@
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index daff68fbe..cacd0b0f3 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-ansible
'''
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index 598f1966d..eb8440d1b 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -42,7 +42,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ffdcd0ce1..58b3a7835 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -26,6 +26,20 @@
- hosts: nodes
become: yes
tasks:
+ - name: Remove dnsmasq dispatcher
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/dnsmasq.d/origin-dns.conf
+ - /etc/dnsmasq.d/origin-upstream-dns.conf
+ - /etc/dnsmasq.d/openshift-ansible.conf
+ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+ when: openshift_use_dnsmasq | default(true) | bool
+ - service:
+ name: NetworkManager
+ state: restarted
+ when: openshift_use_dnsmasq | default(true) | bool
- name: Stop services
service: name={{ item }} state=stopped
with_items:
@@ -103,7 +117,7 @@
- atomic-openshift-sdn-ovs
- cockpit-bridge
- cockpit-docker
- - cockpit-shell
+ - cockpit-system
- cockpit-ws
- kubernetes-client
- openshift
@@ -125,7 +139,7 @@
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
- when: "{{ not is_atomic | bool }}"
+ when: not is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -146,7 +160,7 @@
- lbr0
- vlinuxbr
- vovsbr
- when: "{{ openshift_remove_all | default(true) | bool }}"
+ when: openshift_remove_all | default(true) | bool
- shell: atomic uninstall "{{ item }}"-master-api
changed_when: False
@@ -239,7 +253,7 @@
changed_when: False
failed_when: False
with_items: "{{ images_to_delete.results }}"
- when: "{{ openshift_uninstall_images | default(True) | bool }}"
+ when: openshift_uninstall_images | default(True) | bool
- name: remove sdn drop files
file:
@@ -252,7 +266,7 @@
- /etc/sysconfig/openshift-node
- /etc/sysconfig/openvswitch
- /run/openshift-sdn
- when: "{{ openshift_remove_all | default(True) | bool }}"
+ when: openshift_remove_all | default(True) | bool
- find: path={{ item }} file_type=file
register: files
@@ -279,9 +293,6 @@
with_items:
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
- - /etc/dnsmasq.d/origin-dns.conf
- - /etc/dnsmasq.d/origin-upstream-dns.conf
- - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- /etc/openshift
- /etc/openshift-sdn
- /etc/sysconfig/atomic-enterprise-node
@@ -305,11 +316,19 @@
- shell: systemctl daemon-reload
changed_when: False
+ - name: restart container-engine
+ service: name=container-engine state=restarted
+ failed_when: false
+ register: container_engine
+
- name: restart docker
service: name=docker state=restarted
-
- - name: restart NetworkManager
- service: name=NetworkManager state=restarted
+ failed_when: false
+ when: not (container_engine | changed)
+ register: l_docker_restart_docker_in_pb_result
+ until: not l_docker_restart_docker_in_pb_result | failed
+ retries: 3
+ delay: 30
- hosts: masters
become: yes
@@ -339,7 +358,7 @@
- atomic-openshift-master
- cockpit-bridge
- cockpit-docker
- - cockpit-shell
+ - cockpit-system
- cockpit-ws
- corosync
- kubernetes-client
@@ -386,10 +405,19 @@
- "{{ directories.results | default([]) }}"
- files
+ - set_fact:
+ client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
+
+ - name: Remove client kubeconfigs
+ file:
+ path: "~{{ item }}/.kube"
+ state: absent
+ with_items:
+ - "{{ client_users }}"
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
- - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
- /etc/corosync
@@ -414,7 +442,6 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- - /root/.kube
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index fbaf81dec..119df9c7d 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 8d64b0521..821a0f30e 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -35,4 +35,3 @@
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
- openshift_use_dnsmasq: false
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml
new file mode 100644
index 000000000..0e8e7a94d
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/config.yml
@@ -0,0 +1,8 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-cfme/config.yml
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..c8ed16859
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/uninstall.yml
@@ -0,0 +1,6 @@
+---
+# - include: ../openshift-cluster/initialize_groups.yml
+# tags:
+# - always
+
+- include: ../../common/openshift-cfme/uninstall.yml
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md
new file mode 100644
index 000000000..f0f14b268
--- /dev/null
+++ b/playbooks/byo/openshift-checks/README.md
@@ -0,0 +1,66 @@
+# OpenShift health checks
+
+This directory contains Ansible playbooks for detecting potential problems prior
+to an install, as well as health checks to run on existing OpenShift clusters.
+
+Ansible's default operation mode is to fail fast, on the first error. However,
+when performing checks, it is useful to gather as much information about
+problems as possible in a single run.
+
+Thus, the playbooks run a battery of checks against the inventory hosts and have
+Ansible gather intermediate errors, giving a more complete diagnostic of the
+state of each host. If any check failed, the playbook run will be marked as
+failed.
+
+To facilitate understanding the problems that were encountered, a custom
+callback plugin summarizes execution errors at the end of a playbook run.
+
+# Available playbooks
+
+1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system
+ requirements and look for common problems that can prevent a successful
+ installation of a production cluster.
+
+2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster
+ for known signs of problems.
+
+3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) -
+ check that certificates in use are valid and not expiring soon.
+
+## Running
+
+With a [recent installation of Ansible](../../../README.md#setup), run the playbook
+against your inventory file. Here is the step-by-step:
+
+1. If you haven't done it yet, clone this repository:
+
+ ```console
+ $ git clone https://github.com/openshift/openshift-ansible
+ $ cd openshift-ansible
+ ```
+
+2. Install the [dependencies](../../../README.md#setup)
+
+3. Run the appropriate playbook:
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
+ ```
+
+## Running in a container
+
+This repository is built into a Docker image including Ansible so that it can
+be run anywhere Docker is available, without the need to manually install dependencies.
+Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md).
diff --git a/playbooks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
index 630135cae..630135cae 100644
--- a/playbooks/certificate_expiry/default.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
new file mode 100644
index 000000000..378d1f154
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
@@ -0,0 +1,40 @@
+# This example generates HTML and JSON reports and
+#
+# Copies of the generated HTML and JSON reports are uploaded to the masters,
+# which is particularly useful when this playbook is run from a container.
+#
+# All certificates (healthy or not) are included in the results
+#
+# Optional environment variables to alter the behaviour of the playbook:
+# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45)
+# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report)
+---
+- name: Generate certificate expiration reports
+ hosts: nodes:masters:etcd
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
+ roles:
+ - role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Ensure that the target directory exists
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
diff --git a/playbooks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
index ae41c7c14..ae41c7c14 100644
--- a/playbooks/certificate_expiry/easy-mode.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
diff --git a/playbooks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
index d80cb6ff4..d80cb6ff4 100644
--- a/playbooks/certificate_expiry/html_and_json_default_paths.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
new file mode 100644
index 000000000..2189455b7
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
@@ -0,0 +1,16 @@
+---
+# Generate timestamped HTML and JSON reports in /var/lib/certcheck
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_show_all: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
index 87a0f3be4..87a0f3be4 100644
--- a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
diff --git a/playbooks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
index 960457c4b..960457c4b 100644
--- a/playbooks/certificate_expiry/longer_warning_period.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
new file mode 100644
index 000000000..dfc1a7db0
--- /dev/null
+++ b/playbooks/byo/openshift-checks/health.yml
@@ -0,0 +1,3 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..5e8c3ab9b
--- /dev/null
+++ b/playbooks/byo/openshift-checks/pre-install.yml
@@ -0,0 +1,3 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index cb464cf0d..0db7ccf89 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -13,7 +13,12 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
+
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
| union(g_lb_hosts) | union(g_nfs_hosts)
| union(g_new_node_hosts)| union(g_new_master_hosts)
+ | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts)
| default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 4db0720d0..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 32f9ebfd3..9ce8f0d3c 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,26 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: initialize_groups.yml
- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml
new file mode 100644
index 000000000..2a725510a
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/initialize_groups.yml
@@ -0,0 +1,10 @@
+---
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index f8eebe898..76f165c6d 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -4,29 +4,7 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: initialize_groups.yml
- include: ../../common/openshift-cluster/openshift_logging.yml
vars:
diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml
new file mode 100644
index 000000000..5ad3a1a01
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_metrics.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index ad24b9ad0..a3894e243 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
@@ -16,5 +20,7 @@
- include: ../../common/openshift-node/restart.yml
- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
+ when: openshift_hosted_manage_router | default(true) | bool
- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
+ when: openshift_hosted_manage_registry | default(true) | bool
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
new file mode 100644
index 000000000..29f821eda
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
@@ -0,0 +1,10 @@
+---
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
index ee49364fa..8516baee8 100644
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
index 9c8248c4e..566e8b261 100644
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
index 1695111d0..42777e5e6 100644
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
index e44e95467..6e11a111b 100644
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
@@ -1,6 +1,10 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
index 53ee68db9..30feabab3 100644
--- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
index f8c267569..2630fb234 100644
--- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
new file mode 100644
index 000000000..a9fc18958
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -0,0 +1,12 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/byo/hosts.*.example for the
+# currently supported method.
+#
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/service_catalog.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0425ba518..0f64f40f3 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x)
-- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x)
+- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
+- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
+- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 120000
index d5d864b63..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../../common/openshift-cluster/upgrades/files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 5feb33be4..7f31e26e1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,36 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../../initialize_groups.yml
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
-
-- include: docker_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
index 106dcc12d..5bd5d64ab 100644
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -1,26 +1,6 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../initialize_groups.yml
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
+- include: ../../../common/openshift-cluster/evaluate_groups.yml
- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index d268850d8..697a18c4d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -2,106 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d11e51640..4d284c279 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 5a0f143ac..180a2821f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -4,103 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 25d8cd2ba..8cce91b3f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -2,104 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d52f3c111..8e5d0f5f9 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 07c734a40..d5329b858 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 86f5a36ca..f44d55ad2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -2,110 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index a2f1cd2b1..2377713fa 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -11,105 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index f858de3d5..5b3f6ab06 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
index 930cc753c..797af671a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
@@ -1,11 +1,10 @@
-# v3.5 Major and Minor Upgrade Playbook
+# v3.6 Major and Minor Upgrade Playbook
## Overview
-This playbook currently performs the
-following steps.
+This playbook currently performs the following steps.
* Upgrade and restart master services
- * Unschedule node.
+ * Unschedule node
* Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
@@ -15,4 +14,7 @@ following steps.
* Updates image streams and quickstarts
## Usage
+
+```
ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index 900bbc8d8..40120b3e8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,110 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 5bd0f7ac5..408a4c631 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,105 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 96d89dbdd..b5f42b804 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
new file mode 100644
index 000000000..dd3f47a4d
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/config.yml
@@ -0,0 +1,14 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-etcd/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
+ openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..143016159
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -0,0 +1,8 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-etcd/migrate.yml
+ tags:
+ - always
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index 19403116d..d43533641 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md
new file mode 100644
index 000000000..f62aea229
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/README.md
@@ -0,0 +1,98 @@
+# OpenShift GlusterFS Playbooks
+
+These playbooks are intended to enable the use of GlusterFS volumes by pods in
+OpenShift. While they try to provide a sane set of defaults they do cover a
+variety of scenarios and configurations, so read carefully. :)
+
+## Playbook: config.yml
+
+This is the main playbook that integrates GlusterFS into a new or existing
+OpenShift cluster. It will also, if specified, configure a hosted Docker
+registry with GlusterFS backend storage.
+
+This playbook requires the `glusterfs` group to exist in the Ansible inventory
+file. The hosts in this group are the nodes of the GlusterFS cluster.
+
+ * If this is a newly configured cluster each host must have a
+ `glusterfs_devices` variable defined, each of which must be a list of block
+ storage devices intended for use only by the GlusterFS cluster. If this is
+ also an external GlusterFS cluster, you must specify
+ `openshift_storage_glusterfs_is_native=False`. If the cluster is to be
+ managed by an external heketi service you must also specify
+ `openshift_storage_glusterfs_heketi_is_native=False` and
+ `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi
+ service. All these variables are specified in `[OSEv3:vars]`,
+ * If this is an existing cluster you do not need to specify a list of block
+ devices but you must specify the following variables in `[OSEv3:vars]`:
+ * `openshift_storage_glusterfs_is_missing=False`
+ * `openshift_storage_glusterfs_heketi_is_missing=False`
+
+By default, pods for a native GlusterFS cluster will be created in the
+`default` namespace. To change this, specify
+`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`.
+
+To configure the deployment of a Docker registry with GlusterFS backend
+storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in
+`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the
+registry, specify a `glusterfs_registry` group that is populated as the
+`glusterfs` is with the nodes for the separate cluster. If no
+`glusterfs_registry` group is specified, the cluster defined by the `glusterfs`
+group will be used.
+
+To swap an existing hosted registry's backend storage for a GlusterFS volume,
+specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To
+additoinally copy any existing contents from an existing hosted registry,
+specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`.
+
+**NOTE:** For each namespace that is to have access to GlusterFS volumes an
+Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding
+Service resource must be created. If dynamic provisioning using StorageClasses
+is configure, these resources are created automatically in the namespaces that
+require them. This playbook also takes care of creating these resources in the
+namespaces used for deployment.
+
+An example of a minimal inventory file:
+```
+[OSEv3:children]
+masters
+nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+node0
+node1
+node2
+
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/sdb" ]'
+node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]'
+node2 glusterfs_devices='[ "/dev/sdd" ]'
+```
+
+## Playbook: registry.yml
+
+This playbook is intended for admins who want to deploy a hosted Docker
+registry with GlusterFS backend storage on an existing OpenShift cluster. It
+has all the same requirements and behaviors as `config.yml`.
+
+## Role: openshift_storage_glusterfs
+
+The bulk of the work is done by the `openshift_storage_glusterfs` role. This
+role can handle the deployment of GlusterFS (if it is to be hosted on the
+OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone),
+and (if specified) integration as backend storage for a hosted Docker registry.
+
+See the documentation in the role's directory for further details.
+
+## Role: openshift_hosted
+
+The `openshift_hosted` role recognizes `glusterfs` as a possible storage
+backend for a hosted docker registry. It will also, if configured, handle the
+swap of an existing registry's backend storage to a GlusterFS volume.
diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..3f11f3991
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/config.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/config.yml
diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..6ee6febdb
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/registry.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/registry.yml
diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/byo/openshift-glusterfs/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-preflight/roles
+++ b/playbooks/byo/openshift-glusterfs/roles
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 21e4cff1b..7988863f3 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index a5705e990..8aa07a664 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,27 +1,5 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../openshift-cluster/initialize_groups.yml
- include: ../../common/openshift-master/scaleup.yml
vars:
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
index 9bb3ea17f..b23692237 100644
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -1,42 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../openshift-cluster/initialize_groups.yml
-- name: Install and configure NetworkManager
- hosts: l_oo_all_hosts
- become: yes
- tasks:
- - name: install NetworkManager
- package:
- name: 'NetworkManager'
- state: present
-
- - name: configure NetworkManager
- lineinfile:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
- regexp: '^{{ item }}='
- line: '{{ item }}=yes'
- state: present
- create: yes
- with_items:
- - 'USE_PEERDNS'
- - 'NM_CONTROLLED'
-
- - name: enable and start NetworkManager
- service:
- name: 'NetworkManager'
- state: started
- enabled: yes
+- include: ../../common/openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 6861625b9..92665d71d 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 88d236b53..c6965fd6f 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -1,27 +1,5 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../openshift-cluster/initialize_groups.yml
- include: ../../common/openshift-node/scaleup.yml
vars:
diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md
deleted file mode 100644
index b50292eac..000000000
--- a/playbooks/byo/openshift-preflight/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# OpenShift preflight checks
-
-Here we provide an Ansible playbook for detecting potential roadblocks prior to
-an install or upgrade.
-
-Ansible's default operation mode is to fail fast, on the first error. However,
-when performing checks, it is useful to gather as much information about
-problems as possible in a single run.
-
-The `check.yml` playbook runs a battery of checks against the inventory hosts
-and tells Ansible to ignore intermediate errors, thus giving a more complete
-diagnostic of the state of each host. Still, if any check failed, the playbook
-run will be marked as having failed.
-
-To facilitate understanding the problems that were encountered, we provide a
-custom callback plugin to summarize execution errors at the end of a playbook
-run.
-
----
-
-*Note that currently the `check.yml` playbook is only useful for RPM-based
-installations. Containerized installs are excluded from checks for now, but
-might be included in the future if there is demand for that.*
-
----
-
-## Running
-
-With an installation of Ansible 2.2 or greater, run the playbook directly
-against your inventory file. Here is the step-by-step:
-
-1. If you haven't done it yet, clone this repository:
-
- ```console
- $ git clone https://github.com/openshift/openshift-ansible
- $ cd openshift-ansible
- ```
-
-2. Run the playbook:
-
- ```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml
- ```
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index c5f05d0f0..2e53452a6 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,12 +1,3 @@
---
-- hosts: OSEv3
- name: run OpenShift health checks
- roles:
- - openshift_health_checker
- post_tasks:
- # NOTE: we need to use the old "action: name" syntax until
- # https://github.com/ansible/ansible/issues/20513 is fixed.
- - action: openshift_health_check
- args:
- checks:
- - '@preflight'
+# location is moved; this file remains so existing instructions keep working
+- include: ../openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index a21b6a0a5..a8c1c3a88 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,9 +1,14 @@
---
+- include: openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../common/openshift-cluster/std_include.yml
tags:
- always
- name: Gather Cluster facts
+ # Temporarily reverting to OSEv3 until group standardization is complete
hosts: OSEv3
roles:
- openshift_facts
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 8c6d77024..1b14ff32e 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,10 +1,11 @@
---
-- include: ../common/openshift-cluster/std_include.yml
+- include: openshift-cluster/initialize_groups.yml
tags:
- always
- name: Subscribe hosts, update repos and update OS packages
- hosts: l_oo_all_hosts
+ # Temporarily reverting to OSEv3 until group standardization is complete
+ hosts: OSEv3
roles:
- role: rhel_subscribe
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry
new file mode 120000
index 000000000..9cf5334a1
--- /dev/null
+++ b/playbooks/certificate_expiry
@@ -0,0 +1 @@
+byo/openshift-checks/certificate_expiry/ \ No newline at end of file
diff --git a/playbooks/certificate_expiry/roles b/playbooks/certificate_expiry/roles
deleted file mode 120000
index b741aa3db..000000000
--- a/playbooks/certificate_expiry/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
new file mode 100644
index 000000000..533a35d9e
--- /dev/null
+++ b/playbooks/common/openshift-cfme/config.yml
@@ -0,0 +1,44 @@
+---
+# TODO: Make this work. The 'name' variable below is undefined
+# presently because it's part of the cfme role. This play can't run
+# until that's re-worked.
+#
+# - name: Pre-Pull manageiq-pods docker images
+# hosts: nodes
+# tasks:
+# - name: Ensure the latest manageiq-pods docker image is pulling
+# docker_image:
+# name: "{{ openshift_cfme_container_image }}"
+# # Fire-and-forget method, never timeout
+# async: 99999999999
+# # F-a-f, never check on this. True 'background' task.
+# poll: 0
+
+- name: Configure Masters for CFME Bulk Image Imports
+ hosts: oo_masters_to_config
+ serial: 1
+ tasks:
+ - name: Run master cfme tuning playbook
+ include_role:
+ name: openshift_cfme
+ tasks_from: tune_masters
+
+- name: Setup CFME
+ hosts: oo_first_master
+ vars:
+ r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_cfme_mktemp
+ changed_when: false
+ - name: Ensure the server template was read from disk
+ debug:
+ msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_cfme
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-cfme/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library
new file mode 120000
index 000000000..ba40d2f56
--- /dev/null
+++ b/playbooks/common/openshift-cfme/library
@@ -0,0 +1 @@
+../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-cfme/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..78b8e7668
--- /dev/null
+++ b/playbooks/common/openshift-cfme/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall CFME
+ hosts: masters
+ tasks:
+ - name: Run the CFME Uninstall Role Tasks
+ include_role:
+ name: openshift_cfme
+ tasks_from: uninstall
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
new file mode 100644
index 000000000..c7766ff04
--- /dev/null
+++ b/playbooks/common/openshift-checks/health.yml
@@ -0,0 +1,16 @@
+---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run OpenShift health checks
+ hosts: OSEv3
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "health"
+ post_tasks:
+ - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ args:
+ checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..7ca9f7e8b
--- /dev/null
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -0,0 +1,16 @@
+---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- hosts: OSEv3
+ name: run OpenShift pre-install checks
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "pre-install"
+ post_tasks:
+ - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ args:
+ checks: ['@preflight']
diff --git a/playbooks/common/openshift-checks/roles b/playbooks/common/openshift-checks/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-checks/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1b967b7f1..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,34 +1,36 @@
---
-- name: Set oo_option facts
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+- include: initialize_oo_option_facts.yml
tags:
- always
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
- - set_fact:
- openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
- when: openshift_docker_selinux_enabled is not defined
-
-- include: disable_excluder.yml
+
+- name: Disable excluders
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-etcd/config.yml
tags:
@@ -54,10 +56,26 @@
tags:
- node
+- include: ../openshift-glusterfs/config.yml
+ tags:
+ - glusterfs
+
- include: openshift_hosted.yml
tags:
- hosted
-- include: reset_excluder.yml
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
deleted file mode 100644
index f664c51c9..000000000
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
-
- # During installation the excluders are installed with present state.
- # So no pre-validation check here as the excluders are either to be installed (present = latest)
- # or they are not going to be updated if already installed
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: present
- docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 45a4875a3..baca72c58 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,29 +5,40 @@
become: no
gather_facts: no
tasks:
- - fail:
+ - name: Evaluate groups - g_etcd_hosts required
+ fail:
msg: This playbook requires g_etcd_hosts to be set
- when: "{{ g_etcd_hosts is not defined }}"
+ when: g_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
+ when: g_master_hosts is not defined or g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
+ when: g_node_hosts is not defined or g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
- when: "{{ g_lb_hosts is not defined }}"
+ when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
- when: "{{ g_nfs_hosts is not defined }}"
+ when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
+
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
+ msg: This playbook requires g_glusterfs_hosts to be set
+ when: g_glusterfs_hosts is not defined
- name: Evaluate oo_all_hosts
add_host:
@@ -47,13 +58,13 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ when: g_master_hosts|length > 0
changed_when: no
- name: Evaluate oo_masters_to_config
@@ -65,6 +76,41 @@
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
+ changed_when: no
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ when: g_etcd_hosts|length > 0
+ changed_when: no
+
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ changed_when: False
+
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
@@ -82,40 +128,41 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- - name: Evaluate oo_first_etcd
+ - name: Evaluate oo_lb_to_config
add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
+ name: "{{ item }}"
+ groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: "{{ g_etcd_hosts|length > 0 }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_lb_hosts | default([]) }}"
changed_when: no
- - name: Evaluate oo_first_master
+ - name: Evaluate oo_nfs_to_config
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: "{{ g_master_hosts|length > 0 }}"
+ with_items: "{{ g_nfs_hosts | default([]) }}"
changed_when: no
- - name: Evaluate oo_lb_to_config
+ - name: Evaluate oo_glusterfs_to_config
add_host:
name: "{{ item }}"
- groups: oo_lb_to_config
+ groups: oo_glusterfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_lb_hosts | default([]) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
- - name: Evaluate oo_nfs_to_config
+ - name: Evaluate oo_etcd_to_migrate
add_host:
name: "{{ item }}"
- groups: oo_nfs_to_config
+ groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_nfs_hosts | default([]) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
new file mode 100644
index 000000000..ac3c702a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
@@ -0,0 +1,27 @@
+---
+- name: Set oo_option facts
+ hosts: oo_all_hosts
+ tags:
+ - always
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+ - set_fact:
+ openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
+ when: openshift_docker_selinux_enabled is not defined
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 07b38920f..f4e52869e 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,13 +1,14 @@
---
# NOTE: requires openshift_facts be run
- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
gather_facts: no
tasks:
# See:
# https://bugzilla.redhat.com/show_bug.cgi?id=1395047
# https://bugzilla.redhat.com/show_bug.cgi?id=1282961
# https://github.com/openshift/openshift-ansible/issues/1138
+ # Consider the repoquery module for this work
- name: Check for bad combinations of yum and subscription-manager
command: >
{{ repoquery_cmd }} --installed --qf '%{version}' "yum"
@@ -16,7 +17,7 @@
when: not openshift.common.is_atomic | bool
- fail:
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+ when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 5db71b857..ce7f981ab 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,6 +26,8 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index d96a78c4c..57580406c 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,11 +1,13 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
- openshift_logging
- name: Update Master configs
- hosts: masters:!oo_first_master
+ hosts: oo_masters:!oo_first_master
tasks:
- block:
- include_role:
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9f38ceea6..2c8ad5b75 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,5 +1,16 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml
new file mode 100644
index 000000000..b1ca6f606
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_provisioners.yml
@@ -0,0 +1,5 @@
+---
+- name: OpenShift Provisioners
+ hosts: oo_first_master
+ roles:
+ - openshift_provisioners
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
new file mode 100644
index 000000000..6964e8567
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -0,0 +1,158 @@
+---
+- name: Check cert expirys
+ hosts: oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
+- name: Backup existing etcd CA certificate directories
+ hosts: oo_etcd_to_config
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+ - name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_ca
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: false
+
+- name: Distribute etcd CA to etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+ - name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+ - name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+ - name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+ - copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
+
+- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
+
+- name: Retrieve etcd CA certificate
+ hosts: oo_first_etcd
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+
+- name: Distribute etcd CA to masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Deploy etcd CA
+ copy:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
+ dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
+ when: groups.oo_etcd_to_config | default([]) | length > 0
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file:
+ name: "{{ g_etcd_mktemp.stdout }}"
+ state: absent
+ changed_when: false
+
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
index 2963a5940..6b5c805e6 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
@@ -3,7 +3,8 @@
hosts: oo_first_etcd
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Determine if generated etcd certificates exist
stat:
@@ -27,7 +28,8 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Backup etcd certificates
command: >
@@ -50,6 +52,7 @@
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Redeploy etcd client certificates for masters
hosts: oo_masters_to_config
@@ -63,4 +66,5 @@
etcd_cert_prefix: "master.etcd-"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index cbb4a2434..089ae6bbc 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -6,131 +6,17 @@
msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
when: not openshift.common.version_gte_3_2_or_1_2 | bool
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- roles:
- - etcd_common
- tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
-
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
+- name: Check cert expirys
+ hosts: oo_nodes_to_config:oo_masters_to_config
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_certificate_expiry_show_all: yes
roles:
- - etcd_common
- tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- roles:
- - etcd_common
- tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../../common/openshift-etcd/restart.yml
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -322,7 +208,17 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../../../common/openshift-master/restart.yml
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -371,4 +267,14 @@
state: absent
changed_when: false
-- include: ../../../common/openshift-node/restart.yml
+- include: ../../openshift-node/restart.yml
+ # Do not restart nodes when node certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index e82996cf4..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -66,7 +66,8 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
{% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index a7b614341..748bbbf91 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -67,7 +67,67 @@
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+
+ - block:
+ - assert:
+ that:
+ - "'certfile' in openshift_hosted_router_certificate"
+ - "'keyfile' in openshift_hosted_router_certificate"
+ - "'cafile' in openshift_hosted_router_certificate"
+ msg: |-
+ openshift_hosted_router_certificate has been set in the inventory but is
+ missing one or more required keys. Ensure that 'certfile', 'keyfile',
+ and 'cafile' keys have been specified for the openshift_hosted_router_certificate
+ inventory variable.
+
+ - name: Read router certificate and key
+ become: no
+ local_action:
+ module: slurp
+ src: "{{ item }}"
+ register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
+ with_items:
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
+
+ - name: Write temporary router certificate file
+ copy:
+ content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ mode: 0600
+
+ - name: Write temporary router key file
+ copy:
+ content: "{{ (openshift_router_certificate_output.results
+ | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ mode: 0600
+
+ - name: Replace router-certs secret
+ shell: >
+ {{ openshift.common.client_binary }} secrets new router-certs
+ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ --type=kubernetes.io/tls
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --confirm
+ -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
+
+ - name: Remove temporary router certificate and key files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
- name: Redeploy router
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
deleted file mode 100644
index eaa8ce39c..000000000
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..6c12875fe
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,21 @@
+---
+- include: evaluate_groups.yml
+
+- name: Update Master configs
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 74cc1d527..6ed31a644 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-
- include: evaluate_groups.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
deleted file mode 100644
index d1e431c5e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Record excluder state and disable
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include: pre/validate_excluder.yml
- vars:
- #repoquery_cmd: repoquery_cmd
- #openshift_upgrade_target: openshift_upgrade_target
- excluder: "{{ item }}"
- with_items:
- - "{{ openshift.common.service_type }}-docker-excluder"
- - "{{ openshift.common.service_type }}-excluder"
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: latest
- docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
new file mode 100644
index 000000000..800621857
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
new file mode 100644
index 000000000..a66301c0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 304559f6e..02b8a9d3c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,4 +1,13 @@
---
+- include: ../../evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+
+- include: ../initialize_nodes_to_upgrade.yml
+
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
@@ -11,7 +20,7 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml
+ - include: upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -43,11 +52,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
- - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
+ - include: upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
index 8635eab0d..8635eab0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
+++ b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
index 1b418920f..13313377e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -24,4 +28,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/common/openshift-cluster/upgrades/docker/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/docker/roles
+++ b/playbooks/common/openshift-cluster/upgrades/docker/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 17f8fc6e9..35d000e49 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -32,7 +32,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 7ef79afa9..616ba04f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,88 +1,14 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
+ hosts: oo_etcd_hosts_to_backup
roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- # For non containerized and non embedded we should have the correct version of
- # etcd installed already. So don't do anything.
- #
- # For embedded or containerized we need to use the latest because OCP 3.3 uses
- # a version of etcd that can only be backed up with etcd-3.x and if it's
- # containerized then etcd version may be newer than that on the host so
- # upgrade it.
- #
- # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
- # if you use package there, like this: "Could not find a module for unknown."
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
- #
- # TODO - We should refactor all containerized backups to use the containerized
- # version of etcd to perform the backup rather than relying on the host's
- # binaries. Until we do that we'll continue to have problems backing up etcd
- # when atomic host has an older version than the version that's running in the
- # container whether that's embedded or not
- - name: Install latest etcd for containerized or embedded
- package:
- name: etcd
- state: latest
- when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
-
- - name: Generate etcd backup
- command: >
- {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_backup_tag: etcd_backup_tag
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -91,10 +17,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
deleted file mode 100644
index 30232110e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# F23 GA'd with etcd 2.0, currently has 2.2 in updates
-# F24 GA'd with etcd-2.2, currently has 2.2 in updates
-# F25 Beta currently has etcd 3.0
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd
- package:
- name: "etcd"
- state: "latest"
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
deleted file mode 120000
index 641e04e44..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1 +0,0 @@
-../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..64abc54e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,42 +5,19 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
- backup_tag: "pre-upgrade-"
+ etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include: roles/etcd/tasks/etcdctl.yml
+ - include_role:
+ name: etcd_common
+ vars:
+ r_etcd_common_action: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
deleted file mode 100644
index 3a972e8ab..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd RPM
- package:
- name: etcd-{{ upgrade_version }}*
- state: latest
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..39e82498d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,119 +1,110 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
- when: not openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
-# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
-# through hosts, then loop through tasks only when appropriate
-- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
- serial: 1
+ - block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+ - block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
+
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.1'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.1'
-- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.2'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2'
-- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.2.5
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2.5'
-- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.3'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3'
-- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.3.7
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3.7'
-- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '3.0'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0'
-- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 3.0.15
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0.15'
+
+- include: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.1'
+
+- include: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.1.3'
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include: fedora_tasks.yml
- when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+ - include_role:
+ name: etcd_upgrade
+ when:
+ - ansible_distribution == 'Fedora'
+ - not openshift.common.is_containerized | bool
- name: Backup etcd
include: backup.yml
vars:
- backup_tag: "post-3.0-"
+ etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
new file mode 100644
index 000000000..831ca8f57
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -0,0 +1,17 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_container_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: image
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
+ - openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
new file mode 100644
index 000000000..2e79451e0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -0,0 +1,18 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_rpm_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: rpm
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+ - ansible_distribution == 'RedHat'
+ - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index bcbc4ee02..0f421928b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
-
- include: ../evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
@@ -30,48 +6,17 @@
g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
+- include: ../initialize_oo_option_facts.yml
- include: ../initialize_facts.yml
-- name: Ensure clean repo cache in the event repos have been changed manually
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
- args:
- warn: no
-
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
tasks:
- name: Check if iptables is running
command: systemctl status iptables
- ignore_errors: true
changed_when: false
+ failed_when: false
register: service_iptables_status
- name: Set fact os_firewall_use_firewalld FALSE for iptables
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 673f11889..4eac8b067 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c00795a8d..d9ddf3860 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -5,9 +5,12 @@
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) |
+ replace ( '${version}', openshift_image_tag ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
+ replace ( '${version}', openshift_image_tag ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') |
+ replace ( '${version}', 'v' ~ openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -21,7 +24,10 @@
selector: 'router'
register: all_routers
- - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ - set_fact:
+ haproxy_routers: "{{ all_routers.results.results[0]['items'] |
+ oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
@@ -30,16 +36,15 @@
- all_routers.results.returncode != 0
- name: Update router image to current version
+ oc_edit:
+ kind: dc
+ name: "{{ item['labels']['deploymentconfig'] }}"
+ namespace: "{{ item['namespace'] }}"
+ content:
+ spec.template.spec.containers[0].image: "{{ router_image }}"
+ with_items: "{{ haproxy_routers }}"
when:
- all_routers.results.returncode == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: "{{ haproxy_routers }}"
- # AUDIT:changed_when_note: `false` not being set here. What we
- # need to do is check the current router image version and see if
- # this task needs to be ran.
- name: Check for default registry
oc_obj:
@@ -49,15 +54,34 @@
register: _default_registry
- name: Update registry image to current version
+ oc_edit:
+ kind: dc
+ name: docker-registry
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_image }}"
when:
- _default_registry.results.results[0] != {}
- command: >
- {{ oc_cmd }} patch dc/docker-registry -n default -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
- # AUDIT:changed_when_note: `false` not being set here. What we
- # need to do is check the current registry image version and see
- # if this task needs to be ran.
+
+ - name: Check for registry-console
+ oc_obj:
+ state: list
+ kind: dc
+ name: registry-console
+ register: _registry_console
+ when:
+ - openshift.common.deployment_type != 'origin'
+
+ - name: Update registry-console image to current version
+ oc_edit:
+ kind: dc
+ name: registry-console
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_console_image }}"
+ when:
+ - openshift.common.deployment_type != 'origin'
+ - _registry_console.results.results[0] != {}
roles:
- openshift_manageiq
@@ -95,6 +119,12 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- include: ../reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
deleted file mode 100644
index 6de1ed061..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# input variables:
-# - repoquery_cmd
-# - excluder
-# - openshift_upgrade_target
-- block:
- - name: Get available excluder version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
- register: excluder_version
- failed_when: false
- changed_when: false
-
- - name: Docker excluder version detected
- debug:
- msg: "{{ excluder }}: {{ excluder_version.stdout }}"
-
- - name: Printing upgrade target version
- debug:
- msg: "{{ openshift_upgrade_target }}"
-
- - name: Check the available {{ excluder }} version is at most of the upgrade target version
- fail:
- msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
- when:
- - "{{ excluder_version.stdout != '' }}"
- - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
- when:
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
index 7646e0fa6..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -1,23 +1,20 @@
---
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+# Only check if docker upgrade is required if docker_upgrade is not
+# already set to False.
+- include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
- # Additional checks for Atomic hosts:
+# Additional checks for Atomic hosts:
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
- - fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index c83923dae..9b4a8e413 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,41 +1,43 @@
---
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
+- name: Fail when OpenShift is not installed
+ fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+- name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ repoquery:
+ name: "{{ openshift.common.service_type }}"
+ ignore_excluders: true
+ register: repoquery_out
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - not repoquery_out.results.package_found
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
+ - name: Set fact avail_openshift_version
+ set_fact:
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
- name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when:
+ - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
+- name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 03ac02e9f..164baca81 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,27 +1,39 @@
---
-# We verified latest rpm available is suitable, so just yum update.
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
+
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
-# Master package upgrade ends up depending on node and sdn packages, we need to be explicit
-# with all versions to avoid yum from accidentally jumping to something newer than intended:
- name: Upgrade master packages
- package: name={{ item }} state=present
- when: component == "master"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ package: name={{ master_pkgs | join(',') }} state=present
+ vars:
+ master_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "master"
+ - not openshift.common.is_atomic | bool
- name: Upgrade node packages
- package: name={{ item }} state=present
- when: component == "node"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
-
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+ package: name={{ node_pkgs | join(',') }} state=present
+ vars:
+ node_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "node"
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c6e799261..6a0471948 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,22 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
+
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -118,8 +123,8 @@
yedit:
src: "{{ openshift.common.config_base }}/master/master-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_master) }}"
+ when: oreg_url is defined or oreg_url_master is defined
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -141,6 +146,19 @@
- include: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
+ - name: Post master upgrade - Upgrade clusterpolicies storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=clusterpolicies --confirm
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+ run_once: true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
- set_fact:
master_update_complete: True
@@ -216,13 +234,25 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
- reconcile_scc_result.rc == 0
run_once: true
+ - name: Migrate storage post policy reconciliation
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+
- set_fact:
reconcile_complete: True
@@ -258,8 +288,8 @@
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -281,13 +311,18 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
post_tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e9f894942..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,7 +4,7 @@
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -26,14 +26,22 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
- name: Set node schedulability
@@ -46,7 +54,3 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
-
-- include: ../reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 88f2ddc78..83d2cec81 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -63,12 +63,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+ when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
- debug:
msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+ when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+ when: openshift_master_scheduler_predicates | default(none) is not none
# Handle cases where openshift_master_predicates is not defined
- block:
@@ -87,7 +87,7 @@
when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+ when: openshift_master_scheduler_predicates | default(none) is none
# Upgrade priorities
@@ -120,12 +120,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+ when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
- debug:
msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+ when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+ when: openshift_master_scheduler_priorities | default(none) is not none
# Handle cases where openshift_master_priorities is not defined
- block:
@@ -144,7 +144,7 @@
when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+ when: openshift_master_scheduler_priorities | default(none) is none
# Update scheduler
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 68c71a132..d69472fad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,7 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..f1245aa2e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,117 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..b693ab55c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,117 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..4fd029107
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,112 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 43c2ffcd4..ed89dbe8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,7 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..965e39482
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,115 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..7830f462c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,117 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..4364ff8e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,110 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..4e7c14e94
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,117 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..45b664d06
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,121 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..036d3fcf5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,110 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..5b9ac9e8f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,117 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..a470c7595
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,121 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..25eceaf90
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,110 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
index ac5704f69..78c1767b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -7,4 +7,6 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
- tasks: []
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 48cc03b19..33fc5630f 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -13,4 +13,6 @@
pause:
prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when: lookupip.stdout not in ansible_all_ipv4_addresses
+ when:
+ - lookupip.stdout != '127.0.0.1'
+ - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 1b8106e0e..2cb6197d1 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -7,4 +7,5 @@
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..3e7a48669
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,120 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- include: ../openshift-cluster/initialize_facts.yml
+ tags:
+ - always
+
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+
+- name: Start masters after etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index 196c86f28..af1ef245a 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -5,5 +5,5 @@
tasks:
- name: restart etcd
service:
- name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..edc15a3f2
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -0,0 +1,40 @@
+---
+- name: Open firewall ports for GlusterFS nodes
+ hosts: glusterfs
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - role: os_firewall
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+
+- name: Open firewall ports for GlusterFS registry nodes
+ hosts: glusterfs_registry
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - role: os_firewall
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
+
+- name: Configure GlusterFS
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..80cf7529e
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/registry.yml
@@ -0,0 +1,49 @@
+---
+- include: config.yml
+
+- name: Initialize GlusterFS registry PV and PVC vars
+ hosts: oo_first_master
+ tags: hosted
+ tasks:
+ - set_fact:
+ glusterfs_pv: []
+ glusterfs_pvc: []
+
+ - set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
+ path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Create persistent volumes
+ hosts: oo_first_master
+ tags:
+ - hosted
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index c414913bf..2dacc1218 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -12,5 +12,6 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 60cf56108..5de03951c 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -69,7 +69,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master session secrets and config
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -79,6 +79,24 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -109,6 +127,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
roles:
- role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -117,10 +138,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
+ r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
- role: nuage_master
when: openshift.common.use_nuage | bool
- role: calico_master
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index b35368bf1..6fec346c3 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -1,5 +1,5 @@
---
-- include: ../../common/openshift-master/validate_restart.yml
+- include: validate_restart.yml
- name: Restart masters
hosts: oo_masters_to_config
@@ -12,8 +12,8 @@
roles:
- openshift_facts
post_tasks:
- - include: ../../common/openshift-master/restart_hosts.yml
+ - include: restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
- - include: ../../common/openshift-master/restart_services.yml
+ - include: restart_services.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 67ba0aa2e..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -37,3 +37,4 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 508b5a3ac..a844fb369 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -15,6 +15,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: openshift_master_ha | bool
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 92f16dc47..bc61ee9bb 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -51,7 +51,7 @@
changed_when: false
- name: Configure docker hosts
- hosts: oo_masters_to-config:oo_nodes_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config
vars:
docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
@@ -60,9 +60,15 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-master/config.yml
@@ -70,6 +76,12 @@
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 792ffb4e2..501ba4273 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -32,7 +32,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
@@ -47,8 +47,7 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
roles:
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -64,8 +63,6 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
new file mode 100644
index 000000000..b3a7399dc
--- /dev/null
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -0,0 +1,28 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Install and configure NetworkManager
+ hosts: oo_all_hosts
+ become: yes
+ tasks:
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
+
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
index 441b100e9..ed2473a43 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/common/openshift-node/restart.yml
@@ -11,6 +11,10 @@
service:
name: docker
state: restarted
+ register: l_docker_restart_docker_in_node_result
+ until: not l_docker_restart_docker_in_node_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -36,6 +40,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
- name: restart node
@@ -51,7 +56,7 @@
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_config
- until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
# Give the node two minutes to come back online.
retries: 24
delay: 5
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index c31aca62b..40da8990d 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -27,12 +27,24 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index 74e2420db..05a58db73 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index 74e2420db..05a58db73 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index f782d6dab..569e00da2 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -3,6 +3,8 @@
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- include: ../../common/openshift-cluster/std_include.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -35,4 +37,3 @@
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
- openshift_use_dnsmasq: false
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 78581fdfe..4df86effa 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -14,7 +14,7 @@
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+ when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
register: downloaded_image
- name: Uncompress xz compressed base cloud image
@@ -49,11 +49,15 @@
- '{{ instances }}'
- [ user-data, meta-data ]
+- name: Check for genisoimage
+ command: which genisoimage
+ register: which_genisoimage
+
- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
args:
- chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
+ creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
with_items: '{{ instances }}'
- name: Refresh the libvirt storage pool for openshift
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 98434439c..505f7b3a8 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([]
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 20ce47c07..82329eac1 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -340,16 +340,6 @@ resources:
port_range_max: 10250
remote_mode: remote_group_id
- direction: ingress
- protocol: tcp
- port_range_min: 10255
- port_range_max: 10255
- remote_mode: remote_group_id
- - direction: ingress
- protocol: udp
- port_range_min: 10255
- port_range_max: 10255
- remote_mode: remote_group_id
- - direction: ingress
protocol: udp
port_range_min: 4789
port_range_max: 4789
diff --git a/requirements.txt b/requirements.txt
index 241313b6f..bf95b4ff9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,10 @@
-ansible>=2.2
-click
-pyOpenSSL
+# Versions are pinned to prevent pypi releases arbitrarily breaking
+# tests with new APIs/semantics. We want to update versions deliberately.
+ansible==2.3.1.0
+boto==2.34.0
+click==6.7
+pyOpenSSL==16.2.0
# We need to disable ruamel.yaml for now because of test failures
#ruamel.yaml
-six
+six==1.10.0
+passlib==1.6.5
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
new file mode 100644
index 000000000..12929b354
--- /dev/null
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+
+ansible_service_broker_remove: false
+ansible_service_broker_log_level: info
+ansible_service_broker_output_request: false
+ansible_service_broker_recovery: true
+ansible_service_broker_bootstrap_on_startup: true
+# Recommended you do not enable this for now
+ansible_service_broker_dev_broker: false
+ansible_service_broker_launch_apb_on_bind: false
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
new file mode 100644
index 000000000..ec4aafb79
--- /dev/null
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Fabian von Feilitzsch
+ description: OpenShift Ansible Service Broker
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
new file mode 100644
index 000000000..b3797ef96
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -0,0 +1,280 @@
+---
+
+# Fact setting and validations
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ ansible_service_broker_image_prefix: "{{ ansible_service_broker_image_prefix | default(__ansible_service_broker_image_prefix) }}"
+ ansible_service_broker_image_tag: "{{ ansible_service_broker_image_tag | default(__ansible_service_broker_image_tag) }}"
+
+ ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}"
+ ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}"
+ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}"
+
+ ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
+ ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
+ ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
+ ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
+ ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
+
+- name: set ansible-service-broker image facts using set prefix and tag
+ set_fact:
+ ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
+ ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+
+- include: validate_facts.yml
+
+
+# Deployment of ansible-service-broker starts here
+- name: create openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+
+- name: Set SA cluster-role
+ oc_adm_policy_user:
+ state: present
+ namespace: "openshift-ansible-service-broker"
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb"
+
+- name: create ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ ports:
+ - name: port-1338
+ port: 1338
+ selector:
+ app: openshift-ansible-service-broker
+ service: asb
+
+- name: create etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ ports:
+ - name: etcd-advertise
+ port: 2379
+ selector:
+ app: openshift-ansible-service-broker
+ service: etcd
+
+- name: create route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: present
+ service_name: asb
+ port: 1338
+ register: asb_route_out
+
+- name: get ansible-service-broker route name
+ set_fact:
+ ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}"
+
+- name: create persistent volume claim for etcd
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: PersistentVolumeClaim
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+
+- name: create etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: openshift-ansible-service-broker
+ service: etcd
+ spec:
+ selector:
+ matchLabels:
+ app: openshift-ansible-service-broker
+ service: etcd
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: openshift-ansible-service-broker
+ service: etcd
+ spec:
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_etcd_image }}"
+ name: etcd
+ imagePullPolicy: IfNotPresent
+ terminationMessagePath: /tmp/termination-log
+ workingDir: /etcd
+ args:
+ - '{{ ansible_service_broker_etcd_image_etcd_path }}'
+ - --data-dir=/data
+ - "--listen-client-urls=http://0.0.0.0:2379"
+ - "--advertise-client-urls=http://0.0.0.0:2379"
+ ports:
+ - containerPort: 2379
+ protocol: TCP
+ env:
+ - name: ETCDCTL_API
+ value: "3"
+ volumeMounts:
+ - mountPath: /data
+ name: etcd
+ volumes:
+ - name: etcd
+ persistentVolumeClaim:
+ claimName: etcd
+
+- name: create ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ strategy:
+ type: Recreate
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ serviceAccount: asb
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_image }}"
+ name: asb
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/ansible-service-broker
+ ports:
+ - containerPort: 1338
+ protocol: TCP
+ env:
+ - name: BROKER_CONFIG
+ value: /etc/ansible-service-broker/config.yaml
+ terminationMessagePath: /tmp/termination-log
+ volumes:
+ - name: config-volume
+ configMap:
+ name: broker-config
+ items:
+ - key: broker-config
+ path: config.yaml
+
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: Create config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: ConfigMap
+ content:
+ path: /tmp/cmout
+ data:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ data:
+ broker-config: |
+ registry:
+ name: "{{ ansible_service_broker_registry_type }}"
+ url: "{{ ansible_service_broker_registry_url }}"
+ user: "{{ ansible_service_broker_registry_user }}"
+ pass: "{{ ansible_service_broker_registry_password }}"
+ org: "{{ ansible_service_broker_registry_organization }}"
+ dao:
+ etcd_host: etcd
+ etcd_port: 2379
+ log:
+ logfile: /var/log/ansible-service-broker/asb.log
+ stdout: true
+ level: "{{ ansible_service_broker_log_level }}"
+ color: true
+ openshift: {}
+ broker:
+ dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }}
+ launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }}
+ recovery: {{ ansible_service_broker_recovery | bool | lower }}
+ output_request: {{ ansible_service_broker_output_request | bool | lower }}
+ bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }}
+
+- name: Create the Broker resource in the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: present
+ kind: Broker
+ content:
+ path: /tmp/brokerout
+ data:
+ apiVersion: servicecatalog.k8s.io/v1alpha1
+ kind: Broker
+ metadata:
+ name: ansible-service-broker
+ spec:
+ url: http://asb.openshift-ansible-service-broker.svc:1338
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
new file mode 100644
index 000000000..b46ce8233
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not ansible_service_broker_remove|default(false) | bool
+
+- include: remove.yml
+ when: ansible_service_broker_remove|default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..2519f9f4c
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -0,0 +1,65 @@
+---
+
+- name: remove openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove persistent volume claim for etcd
+ oc_pvc:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+- name: remove ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: remove config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: ConfigMap
+
+# TODO: Is this going to work?
+- name: remove broker object from the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: absent
+ kind: Broker
diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml
new file mode 100644
index 000000000..604d24e1d
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/validate_facts.yml
@@ -0,0 +1,15 @@
+---
+- name: validate Dockerhub registry settings
+ fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters"
+ when:
+ - ansible_service_broker_registry_type == 'dockerhub'
+ - not (ansible_service_broker_registry_user and
+ ansible_service_broker_registry_password and
+ ansible_service_broker_registry_organization)
+
+
+- name: validate RHCC registry settings
+ fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url"
+ when:
+ - ansible_service_broker_registry_type == 'rhcc'
+ - not ansible_service_broker_registry_url
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..15e448515
--- /dev/null
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd
+
+__ansible_service_broker_registry_type: dockerhub
+__ansible_service_broker_registry_url: null
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..0b3a2a69d
--- /dev/null
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: rhel7/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
+
+__ansible_service_broker_registry_type: rhcc
+__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/calico/README.md b/roles/calico/README.md
index 99e870521..9b9458bfa 100644
--- a/roles/calico/README.md
+++ b/roles/calico/README.md
@@ -20,6 +20,15 @@ To install, set the following inventory configuration parameters:
* `openshift_use_openshift_sdn=False`
* `os_sdn_network_plugin_name='cni'`
+## Additional Calico/Node and Felix Configuration Options
+
+Additional parameters that can be defined in the inventory are:
+
+| Environment | Description | Schema | Default |
+|---------|----------------------|---------|---------|
+|CALICO_IPV4POOL_CIDR| The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. |IPv4 CIDR | 192.168.0.0/16 |
+| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up. | off, always, cross-subnet | always |
+| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String | /var/log/calico |
### Contact Information
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index a81fc3af7..207dee068 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -1,10 +1,15 @@
---
kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshift.common.hostname }}.kubeconfig"
-etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz"
-calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
-calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
-calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam"
+
+calico_ipv4pool_ipip: "always"
+calico_ipv4pool_cidr: "192.168.0.0/16"
+
+calico_log_dir: "/var/log/calico"
+calico_node_image: "calico/node:v1.2.1"
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 65d75cf00..67fc0065f 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -5,4 +5,10 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
+ register: l_docker_restart_docker_in_calico_result
+ until: not l_docker_restart_docker_in_calico_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 287fed321..e62378532 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -1,5 +1,12 @@
---
-- include: ../../../roles/etcd_client_certificates/tasks/main.yml
+- name: Calico Node | Error if invalid cert arguments
+ fail:
+ msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
+ when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
+
+- name: Calico Node | Generate OpenShift-etcd certs
+ include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
vars:
etcd_cert_prefix: calico.etcd-
etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
@@ -7,20 +14,39 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
-- name: Assure the calico certs have been generated
+- name: Calico Node | Set etcd cert location facts
+ when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
+ set_fact:
+ calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
+ calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
+ calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
+ calico_etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
+ calico_etcd_cert_dir: "/etc/origin/calico/"
+
+- name: Calico Node | Error if no certs set.
+ fail:
+ msg: "Invalid etcd configuration for calico."
+ when: item is not defined or item == ''
+ with_items:
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
+
+- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file}}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file}}"
+ - "{{ calico_etcd_key_file }}"
-- name: Configure Calico service unit file
+- name: Calico Node | Configure Calico service unit file
template:
dest: "/lib/systemd/system/calico.service"
src: calico.service.j2
-- name: Enable calico
+- name: Calico Node | Enable calico
become: yes
systemd:
name: calico
@@ -29,46 +55,46 @@
enabled: yes
register: start_result
-- name: Assure CNI conf dir exists
+- name: Calico Node | Assure CNI conf dir exists
become: yes
file: path="{{ cni_conf_dir }}" state=directory
-- name: Generate Calico CNI config
+- name: Calico Node | Generate Calico CNI config
become: yes
template:
- src: "calico.conf.j2"
+ src: "10-calico.conf.j2"
dest: "{{ cni_conf_dir }}/10-calico.conf"
-- name: Assures Kuberentes CNI bin dir exists
+- name: Calico Node | Assures Kuberentes CNI bin dir exists
become: yes
file: path="{{ cni_bin_dir }}" state=directory
-- name: Download Calico CNI Plugin
+- name: Calico Node | Download Calico CNI Plugin
become: yes
get_url:
- url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico
+ url: "{{ calico_url_cni }}"
dest: "{{ cni_bin_dir }}"
mode: a+x
-- name: Download Calico IPAM Plugin
+- name: Calico Node | Download Calico IPAM Plugin
become: yes
get_url:
- url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam
+ url: "{{ calico_url_ipam }}"
dest: "{{ cni_bin_dir }}"
mode: a+x
-- name: Download and unzip standard CNI plugins
+- name: Calico Node | Download and extract standard CNI plugins
become: yes
unarchive:
remote_src: True
- src: https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz
+ src: "{{ cni_url }}"
dest: "{{ cni_bin_dir }}"
-- name: Assure Calico conf dir exists
+- name: Calico Node | Assure Calico conf dir exists
become: yes
file: path=/etc/calico/ state=directory
-- name: Set calicoctl.cfg
+- name: Calico Node | Set calicoctl.cfg
template:
- src: calico.cfg.j2
+ src: calicoctl.cfg.j2
dest: "/etc/calico/calicoctl.cfg"
diff --git a/roles/calico/templates/calico.conf.j2 b/roles/calico/templates/10-calico.conf.j2
index 3c8c6b046..1ec569cff 100644
--- a/roles/calico/templates/calico.conf.j2
+++ b/roles/calico/templates/10-calico.conf.j2
@@ -4,7 +4,7 @@
"ipam": {
"type": "calico-ipam"
},
- "etcd_endpoints": "{{ etcd_endpoints }}",
+ "etcd_endpoints": "{{ calico_etcd_endpoints }}",
"etcd_key_file": "{{ calico_etcd_key_file }}",
"etcd_cert_file": "{{ calico_etcd_cert_file }}",
"etcd_ca_cert_file": "{{ calico_etcd_ca_cert_file }}",
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index b882a5597..302c5f34e 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -1,7 +1,7 @@
[Unit]
Description=calico
-After=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
Restart=always
@@ -10,18 +10,20 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
--name=calico-node \
-e WAIT_FOR_DATASTORE=true \
-e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
- -e CALICO_IPV4POOL_IPIP=always \
+ -e CALICO_IPV4POOL_IPIP={{ calico_ipv4pool_ipip }} \
+ -e CALICO_IPV4POOL_CIDR={{ calico_ipv4pool_cidr }} \
-e FELIX_IPV6SUPPORT=false \
- -e ETCD_ENDPOINTS={{ etcd_endpoints }} \
- -v /etc/origin/calico:/etc/origin/calico \
+ -e ETCD_ENDPOINTS={{ calico_etcd_endpoints }} \
+ -v {{ calico_etcd_cert_dir }}:{{ calico_etcd_cert_dir }} \
-e ETCD_CA_CERT_FILE={{ calico_etcd_ca_cert_file }} \
-e ETCD_CERT_FILE={{ calico_etcd_cert_file }} \
-e ETCD_KEY_FILE={{ calico_etcd_key_file }} \
-e NODENAME={{ openshift.common.hostname }} \
- -v /var/log/calico:/var/log/calico \
+ -v {{ calico_log_dir }}:/var/log/calico\
-v /lib/modules:/lib/modules \
-v /var/run/calico:/var/run/calico \
- calico/node:v1.1.0
+ {{ calico_node_image }}
+
ExecStop=-/usr/bin/docker stop calico-node
diff --git a/roles/calico/templates/calico.cfg.j2 b/roles/calico/templates/calicoctl.cfg.j2
index 722385ed8..a00ea27dc 100644
--- a/roles/calico/templates/calico.cfg.j2
+++ b/roles/calico/templates/calicoctl.cfg.j2
@@ -3,7 +3,7 @@ kind: calicoApiConfig
metadata:
spec:
datastoreType: "etcdv2"
- etcdEndpoints: "{{ etcd_endpoints }}"
+ etcdEndpoints: "{{ calico_etcd_endpoints }}"
etcdKeyFile: "{{ calico_etcd_key_file }}"
etcdCertFile: "{{ calico_etcd_cert_file }}"
etcdCaCertFile: "{{ calico_etcd_ca_cert_file }}"
diff --git a/roles/calico_master/README.md b/roles/calico_master/README.md
index 2d34a967c..6f5ed0664 100644
--- a/roles/calico_master/README.md
+++ b/roles/calico_master/README.md
@@ -21,6 +21,18 @@ To install, set the following inventory configuration parameters:
* `os_sdn_network_plugin_name='cni'`
+
+## Additional Calico/Node and Felix Configuration Options
+
+Additional parameters that can be defined in the inventory are:
+
+
+| Environment | Description | Schema | Default |
+|---------|----------------------|---------|---------|
+|CALICO_IPV4POOL_CIDR| The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. |IPv4 CIDR | 192.168.0.0/16 |
+| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up. | off, always, cross-subnet | always |
+| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String | /var/log/calico |
+
### Contact Information
Author: Dan Osborne <dan@projectcalico.org>
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index db0d17884..b2df0105f 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -1,2 +1,7 @@
---
kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
+
+calicoctl_bin_dir: "/usr/local/bin/"
+
+calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
+calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4"
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 3358abe23..8ddca26d6 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Assure the calico certs have been generated
+- name: Calico Master | Assure the calico certs have been generated
stat:
path: "{{ item }}"
with_items:
@@ -7,17 +7,17 @@
- "{{ calico_etcd_cert_file}}"
- "{{ calico_etcd_key_file }}"
-- name: Create temp directory for policy controller definition
+- name: Calico Master | Create temp directory for policy controller definition
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
register: mktemp
changed_when: False
-- name: Write Calico Policy Controller definition
+- name: Calico Master | Write Calico Policy Controller definition
template:
dest: "{{ mktemp.stdout }}/calico-policy-controller.yml"
src: calico-policy-controller.yml.j2
-- name: Launch Calico Policy Controller
+- name: Calico Master | Launch Calico Policy Controller
command: >
{{ openshift.common.client_binary }} create
-f {{ mktemp.stdout }}/calico-policy-controller.yml
@@ -26,16 +26,23 @@
failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
changed_when: ('created' in calico_create_output.stdout)
-- name: Delete temp directory
+- name: Calico Master | Delete temp directory
file:
name: "{{ mktemp.stdout }}"
state: absent
changed_when: False
-- name: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico
+- name: Calico Master | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico
oc_adm_policy_user:
user: system:serviceaccount:kube-system:calico
resource_kind: scc
resource_name: privileged
state: present
+
+- name: Download Calicoctl
+ become: yes
+ get_url:
+ url: "{{ calico_url_calicoctl }}"
+ dest: "{{ calicoctl_bin_dir }}"
+ mode: a+x
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 66c334ceb..811884473 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,11 +74,11 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.3
+ image: {{ calico_url_policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
- value: {{ etcd_endpoints }}
+ value: {{ calico_etcd_endpoints }}
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
value: {{ calico_etcd_ca_cert_file }}
@@ -96,10 +96,10 @@ spec:
volumeMounts:
# Mount in the etcd TLS secrets.
- name: certs
- mountPath: /etc/origin/calico
+ mountPath: {{ calico_etcd_cert_dir }}
volumes:
# Mount in the etcd TLS secrets.
- name: certs
hostPath:
- path: /etc/origin/calico
+ path: {{ calico_etcd_cert_dir }}
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 8bd68787a..0114498f8 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,13 +1,16 @@
---
- block:
- - name: Create passthrough route for docker-registry
+
+ # When openshift_hosted_manage_registry=true the openshift_hosted
+ # role will create the appropriate route for the docker-registry.
+ # When openshift_hosted_manage_registry=false then this code will
+ # not be run.
+ - name: fetch the docker-registry route
oc_route:
kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: docker-registry
namespace: default
- service_name: docker-registry
- state: present
- tls_termination: passthrough
+ state: list
register: docker_registry_route
- name: Create passthrough route for registry-console
@@ -41,7 +44,7 @@
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
--config={{ openshift_hosted_kubeconfig }}
-n default
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index bddad778f..57f49ea11 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -3,7 +3,7 @@
package: name={{ item }} state=present
with_items:
- cockpit-ws
- - cockpit-shell
+ - cockpit-system
- cockpit-bridge
- cockpit-docker
- "{{ cockpit_plugins }}"
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index 1ccae61f2..8c4d19537 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -1,12 +1,12 @@
---
# The version of Contiv binaries to use
-contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC
+contiv_version: 1.0.1
# The version of cni binaries
cni_version: v0.4.0
-contiv_default_subnet: "20.1.1.1/24"
-contiv_default_gw: "20.1.1.254"
+contiv_default_subnet: "10.128.0.0/16"
+contiv_default_gw: "10.128.254.254"
# TCP port that Netmaster listens for network connections
netmaster_port: 9999
@@ -69,6 +69,9 @@ netplugin_fwd_mode: bridge
# Contiv fabric mode aci|default
contiv_fabric_mode: default
+# Global VLAN range
+contiv_vlan_range: "2900-3000"
+
# Encapsulation type vlan|vxlan to use for instantiating container networks
contiv_encap_mode: vlan
@@ -78,8 +81,8 @@ netplugin_driver: ovs
# Create a default Contiv network for use by pods
contiv_default_network: true
-# VLAN/ VXLAN tag value to be used for the default network
-contiv_default_network_tag: 1
+# Statically configured tag for default network (if needed)
+contiv_default_network_tag: ""
#SRFIXME (use the openshift variables)
https_proxy: ""
@@ -95,6 +98,9 @@ apic_leaf_nodes: ""
apic_phys_dom: ""
apic_contracts_unrestricted_mode: no
apic_epg_bridge_domain: not_specified
+apic_configure_default_policy: false
+apic_default_external_contract: "uni/tn-common/brc-default"
+apic_default_app_profile: "contiv-infra-app-profile"
is_atomic: False
kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
master_name: "{{ groups['masters'][0] }}"
@@ -104,3 +110,12 @@ kube_ca_cert: "{{ kube_cert_dir }}/ca.crt"
kube_key: "{{ kube_cert_dir }}/admin.key"
kube_cert: "{{ kube_cert_dir }}/admin.crt"
kube_master_api_port: 8443
+
+# contivh1 default subnet and gateway
+#contiv_h1_subnet_default: "132.1.1.0/24"
+#contiv_h1_gw_default: "132.1.1.1"
+contiv_h1_subnet_default: "10.129.0.0/16"
+contiv_h1_gw_default: "10.129.0.1"
+
+# contiv default private subnet for ext access
+contiv_private_ext_subnet: "10.130.0.0/16"
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index 3223afb6e..da6409f1e 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -26,3 +26,5 @@ dependencies:
etcd_url_scheme: http
etcd_peer_url_scheme: http
when: contiv_role == "netmaster"
+- role: contiv_auth_proxy
+ when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true)
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index 9cf98bb80..f679443e0 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -6,10 +6,53 @@
retries: 9
delay: 10
+- name: Contiv | Set globals
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+
+- name: Contiv | Set arp mode to flood if ACI
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
+ when: contiv_fabric_mode == "aci"
+
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
- name: Contiv | Create default-net
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway={{ contiv_default_gw }} default-net'
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+
+- name: Contiv | Create host access infra network for VxLan routing case
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
+ when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+
+#- name: Contiv | Create an allow-all policy for the default-group
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
+# when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Set up aci external contract to consume default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+- name: Contiv | Set up aci external contract to provide default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+- name: Contiv | Create aci default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
+ when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Add external contracts to the default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+#- name: Contiv | Add policy rule 1 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
+# when: contiv_fabric_mode == "aci"
+
+#- name: Contiv | Add policy rule 2 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
+# when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Create default aci app profile
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
+ when: contiv_fabric_mode == "aci"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index 5057767b8..acaf7386e 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -23,7 +23,7 @@
line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster"
state: present
when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined
- with_items: groups['masters']
+ with_items: "{{ groups['masters'] }}"
- name: Netmaster | Create netmaster symlinks
file:
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 2d0fb95ae..07bb16ea7 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -2,7 +2,7 @@
- name: Netmaster IPtables | Get iptables rules
command: iptables -L --wait
register: iptablesrules
- always_run: yes
+ check_mode: no
- name: Netmaster IPtables | Enable iptables at boot
service:
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 97b9762df..e861a2591 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -105,9 +105,13 @@
- name: Docker | Restart docker
service:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: docker_updated|changed
+ register: l_docker_restart_docker_in_contiv_result
+ until: not l_docker_restart_docker_in_contiv_result | failed
+ retries: 3
+ delay: 30
- name: Netplugin | Enable Netplugin
service:
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
index 8c348ac67..3ea34645d 100644
--- a/roles/contiv/tasks/netplugin_iptables.yml
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -2,7 +2,7 @@
- name: Netplugin IPtables | Get iptables rules
command: iptables -L --wait
register: iptablesrules
- always_run: yes
+ check_mode: no
- name: Netplugin IPtables | Enable iptables at boot
service:
@@ -23,7 +23,36 @@
notify: Save iptables rules
- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "vxlan"
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472"
+ when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1
+ notify: Save iptables rules
- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "vxlan"
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789"
+ when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow from contivh0
+ command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input"
+ when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow to contivh0
+ command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output"
+ when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow from contivh1
+ command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input"
+ when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow to contivh1
+ command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output"
+ when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow dns
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns"
+ when: iptablesrules.stdout.find("contiv dns") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index 2eff1b85f..e0d48e643 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -4,9 +4,10 @@
did_install: false
- include: pkgMgrInstallers/centos-install.yml
- when: ansible_distribution == "CentOS" and not is_atomic
+ when: (ansible_os_family == "RedHat") and
+ not is_atomic
- name: Package Manager | Set fact saying we did CentOS package install
set_fact:
did_install: true
- when: ansible_distribution == "CentOS"
+ when: (ansible_os_family == "RedHat")
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 51c3d35ac..91e6aadf3 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -1,13 +1,13 @@
---
-- name: PkgMgr CentOS | Install net-tools pkg for route
+- name: PkgMgr RHEL/CentOS | Install net-tools pkg for route
yum:
pkg=net-tools
state=latest
-- name: PkgMgr CentOS | Get openstack kilo rpm
+- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
get_url:
- url: https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-2.noarch.rpm
- dest: /tmp/rdo-release-kilo-2.noarch.rpm
+ url: https://repos.fedorapeople.org/repos/openstack/openstack-ocata/rdo-release-ocata-2.noarch.rpm
+ dest: /tmp/rdo-release-ocata-2.noarch.rpm
validate_certs: False
environment:
http_proxy: "{{ http_proxy|default('') }}"
@@ -16,15 +16,15 @@
tags:
- ovs_install
-- name: PkgMgr CentOS | Install openstack kilo rpm
- yum: name=/tmp/rdo-release-kilo-2.noarch.rpm state=present
+- name: PkgMgr RHEL/CentOS | Install openstack ocata rpm
+ yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present
tags:
- ovs_install
-- name: PkgMgr CentOS | Install ovs
+- name: PkgMgr RHEL/CentOS | Install ovs
yum:
- pkg=openvswitch
- state=latest
+ pkg=openvswitch-2.5.0-2.el7.x86_64
+ state=present
environment:
http_proxy: "{{ http_proxy|default('') }}"
https_proxy: "{{ https_proxy|default('') }}"
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 8e4b66fbe..4506d2231 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,6 +1,6 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service
[Service]
ExecStart={{ bin_dir }}/aci_gw.sh start
diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2
index f3d26c037..a4928cc3d 100644
--- a/roles/contiv/templates/netplugin.j2
+++ b/roles/contiv/templates/netplugin.j2
@@ -1,9 +1,7 @@
{% if contiv_encap_mode == "vlan" %}
NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
{% endif %}
-{# Note: Commenting out vxlan encap mode support until it is fully supported
{% if contiv_encap_mode == "vxlan" %}
-NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -e {{contiv_encap_mode}} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
{% endif %}
-#}
diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md
new file mode 100644
index 000000000..287b6c148
--- /dev/null
+++ b/roles/contiv_auth_proxy/README.md
@@ -0,0 +1,29 @@
+Role Name
+=========
+
+Role to install Contiv API Proxy and UI
+
+Requirements
+------------
+
+Docker needs to be installed to run the auth proxy container.
+
+Role Variables
+--------------
+
+auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container.
+auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates.
+auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address.
+
+Dependencies
+------------
+
+docker
+
+Example Playbook
+----------------
+
+- hosts: netplugin-node
+ become: true
+ roles:
+ - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 }
diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml
new file mode 100644
index 000000000..4e637a947
--- /dev/null
+++ b/roles/contiv_auth_proxy/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2"
+auth_proxy_port: 10000
+contiv_certs: "/var/contiv/certs"
+cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
+auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
+auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
+auth_proxy_datastore: "{{ cluster_store }}"
+auth_proxy_binaries: "/var/contiv_cache"
+auth_proxy_local_install: False
+auth_proxy_rule_comment: "Contiv auth proxy service"
diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service
new file mode 100644
index 000000000..7cd2edff1
--- /dev/null
+++ b/roles/contiv_auth_proxy/files/auth-proxy.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Contiv Proxy and UI
+After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+
+[Service]
+ExecStart=/usr/bin/auth_proxy.sh start
+ExecStop=/usr/bin/auth_proxy.sh stop
+KillMode=control-group
+Restart=on-failure
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml
new file mode 100644
index 000000000..9cb9bea49
--- /dev/null
+++ b/roles/contiv_auth_proxy/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for auth_proxy
diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml
new file mode 100644
index 000000000..a29659cc9
--- /dev/null
+++ b/roles/contiv_auth_proxy/tasks/cleanup.yml
@@ -0,0 +1,10 @@
+---
+
+- name: stop auth-proxy container
+ service: name=auth-proxy state=stopped
+
+- name: cleanup iptables for auth proxy
+ shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
+ become: true
+ with_items:
+ - "{{ auth_proxy_port }}"
diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml
new file mode 100644
index 000000000..74e7bf794
--- /dev/null
+++ b/roles/contiv_auth_proxy/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+# tasks file for auth_proxy
+- name: setup iptables for auth proxy
+ shell: >
+ ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \
+ iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
+ become: true
+ with_items:
+ - "{{ auth_proxy_port }}"
+
+# Load the auth-proxy-image from local tar. Ignore any errors to handle the
+# case where the image is not built in
+- name: copy auth-proxy image
+ copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar
+ when: auth_proxy_local_install == True
+
+- name: load auth-proxy image
+ shell: docker load -i /tmp/auth-proxy-image.tar
+ when: auth_proxy_local_install == True
+
+- name: create cert folder for proxy
+ file: path=/var/contiv/certs state=directory
+
+- name: copy shell script for starting auth-proxy
+ template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx
+
+- name: copy cert for starting auth-proxy
+ copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r
+
+- name: copy key for starting auth-proxy
+ copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r
+
+- name: copy systemd units for auth-proxy
+ copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service
+
+- name: start auth-proxy container
+ systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes
diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2
new file mode 100644
index 000000000..e82e5b4ab
--- /dev/null
+++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+usage="$0 start/stop"
+if [ $# -ne 1 ]; then
+ echo USAGE: $usage
+ exit 1
+fi
+
+case $1 in
+start)
+ set -e
+
+ /usr/bin/docker run --rm \
+ -p 10000:{{ auth_proxy_port }} \
+ --net=host --name=auth-proxy \
+ -e NO_NETMASTER_STARTUP_CHECK=1 \
+ -v /var/contiv:/var/contiv \
+ {{ auth_proxy_image }} \
+ --tls-key-file={{ auth_proxy_key }} \
+ --tls-certificate={{ auth_proxy_cert }} \
+ --data-store-address={{ auth_proxy_datastore }} \
+ --netmaster-address={{ service_vip }}:9999 \
+ --listen-address=:10000
+ ;;
+
+stop)
+ # don't stop on error
+ /usr/bin/docker stop auth-proxy
+ /usr/bin/docker rm -f -v auth-proxy
+ ;;
+
+*)
+ echo USAGE: $usage
+ exit 1
+ ;;
+esac
diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory
new file mode 100644
index 000000000..d18580b3c
--- /dev/null
+++ b/roles/contiv_auth_proxy/tests/inventory
@@ -0,0 +1 @@
+localhost \ No newline at end of file
diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml
new file mode 100644
index 000000000..2af3250cd
--- /dev/null
+++ b/roles/contiv_auth_proxy/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - auth_proxy
diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml
new file mode 100644
index 000000000..9032766c4
--- /dev/null
+++ b/roles/contiv_auth_proxy/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for auth_proxy
diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml
index a6c08fa63..7b8150954 100644
--- a/roles/contiv_facts/defaults/main.yaml
+++ b/roles/contiv_facts/defaults/main.yaml
@@ -8,3 +8,6 @@ bin_dir: /usr/bin
ansible_temp_dir: /tmp/.ansible/files
source_type: packageManager
+
+# Whether or not to also install and enable the Contiv auth_proxy
+contiv_enable_auth_proxy: false
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 926e0e0be..7a4972fca 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -3,7 +3,7 @@
stat: path=/run/ostree-booted
register: s
changed_when: false
- always_run: yes
+ check_mode: no
- name: Init the is_atomic fact
set_fact:
@@ -17,7 +17,7 @@
- name: Determine if CoreOS
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
register: distro
- always_run: yes
+ check_mode: no
- name: Init the is_coreos fact
set_fact:
@@ -61,7 +61,7 @@
stat: path=/usr/bin/rpm
register: s
changed_when: false
- always_run: yes
+ check_mode: no
- name: Init the has_rpm fact
set_fact:
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index d2f66dac5..07401a6dd 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -4,7 +4,7 @@
register: s
changed_when: false
failed_when: false
- always_run: yes
+ check_mode: no
- name: Set the has_firewalld fact
set_fact:
@@ -16,7 +16,7 @@
register: s
changed_when: false
failed_when: false
- always_run: yes
+ check_mode: no
- name: Set the has_iptables fact
set_fact:
diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2
index 566739f25..6e0a7a640 100644
--- a/roles/dns/templates/named.service.j2
+++ b/roles/dns/templates/named.service.j2
@@ -1,7 +1,7 @@
[Unit]
-Requires=docker.service
-After=docker.service
-PartOf=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
Type=simple
@@ -12,4 +12,4 @@ ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /
ExecStop=/usr/bin/docker stop bind
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/docker/README.md b/roles/docker/README.md
index ea06fd41a..19908c036 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -1,7 +1,9 @@
Docker
=========
-Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+
+container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
@@ -11,8 +13,10 @@ Ansible 2.2
Role Variables
--------------
-udevw_udevd_dir: location of systemd config for systemd-udevd.service
+docker_conf_dir: location of the Docker configuration directory
+docker_systemd_dir location of the systemd directory for Docker
docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
+udevw_udevd_dir: location of systemd config for systemd-udevd.service
Dependencies
------------
@@ -26,6 +30,7 @@ Example Playbook
roles:
- role: docker
docker_udev_workaround: "true"
+ docker_use_system_container: False
License
-------
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 9ccb306fc..591367467 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,8 +2,12 @@
- name: restart docker
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
+ register: r_docker_restart_docker_result
+ until: not r_docker_restart_docker_result | failed
+ retries: 3
+ delay: 30
when: not docker_service_status_changed | default(false) | bool
- name: restart udev
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index ad28cece9..cd4083572 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: os_firewall
+- role: lib_openshift
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index c34700aeb..0c2b16acf 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,119 +1,17 @@
---
-- name: Get current installed Docker version
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool
- register: curr_docker_version
- changed_when: false
-
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
-
-# Make sure Docker is installed, but does not update a running version.
-# Docker upgrades are handled by a separate playbook.
-- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
-
-- block:
- # Extend the default Docker service unit file when using iptables-services
- - name: Ensure docker.service.d directory exists
- file:
- path: "{{ docker_systemd_dir }}"
- state: directory
-
- - name: Configure Docker service unit file
- template:
- dest: "{{ docker_systemd_dir }}/custom.conf"
- src: custom.conf.j2
- when: not os_firewall_use_firewalld | default(True) | bool
+# These tasks dispatch to the proper set of docker tasks based on the
+# inventory:openshift_docker_use_system_container variable
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
-- stat: path=/etc/sysconfig/docker
- register: docker_check
-
-- name: Set registry params
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
- with_items:
- - reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- - reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- - reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
- notify:
- - restart docker
-
-- name: Set Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
- state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
- with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') }}"
- notify:
- - restart docker
- when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
-
-- name: Set various Docker options
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^OPTIONS=.*$'
- line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
- when: docker_check.stat.isreg is defined and docker_check.stat.isreg
- notify:
- - restart docker
-
-- name: Start the Docker service
- systemd:
- name: docker
- enabled: yes
- state: started
- daemon_reload: yes
- register: start_result
-
- set_fact:
- docker_service_status_changed: start_result | changed
+ l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
+
+- name: Use Package Docker if Requested
+ include: package_docker.yml
+ when: not l_use_system_container
-- meta: flush_handlers
+- name: Use System Container Docker if Requested
+ include: systemcontainer_docker.yml
+ when: l_use_system_container
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
new file mode 100644
index 000000000..bc52ab60c
--- /dev/null
+++ b/roles/docker/tasks/package_docker.yml
@@ -0,0 +1,134 @@
+---
+- name: Get current installed Docker version
+ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ when: not openshift.common.is_atomic | bool
+ register: curr_docker_version
+ changed_when: false
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+
+# Make sure Docker is installed, but does not update a running version.
+# Docker upgrades are handled by a separate playbook.
+- name: Install Docker
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+
+- block:
+ # Extend the default Docker service unit file when using iptables-services
+ - name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
+
+ - name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: not os_firewall_use_firewalld | default(False) | bool
+
+- stat: path=/etc/sysconfig/docker
+ register: docker_check
+
+- name: Set registry params
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ with_items:
+ - reg_conf_var: ADD_REGISTRY
+ reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
+ reg_flag: --add-registry
+ - reg_conf_var: BLOCK_REGISTRY
+ reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
+ reg_flag: --block-registry
+ - reg_conf_var: INSECURE_REGISTRY
+ reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
+ reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') }}"
+ notify:
+ - restart docker
+ when:
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+
+- name: Set various Docker options
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^OPTIONS=.*$'
+ line: "OPTIONS='\
+ {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
+ {% if docker_options is defined %} {{ docker_options }}{% endif %}\
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ when: docker_check.stat.isreg is defined and docker_check.stat.isreg
+ notify:
+ - restart docker
+
+- stat: path=/etc/sysconfig/docker-network
+ register: sysconfig_docker_network_check
+
+- name: Configure Docker Network OPTIONS
+ lineinfile:
+ dest: /etc/sysconfig/docker-network
+ regexp: '^DOCKER_NETWORK_OPTIONS=.*$'
+ line: "DOCKER_NETWORK_OPTIONS='\
+ {% if openshift.node is defined and openshift.node.sdn_mtu is defined %} --mtu={{ openshift.node.sdn_mtu }}{% endif %}'"
+ when:
+ - sysconfig_docker_network_check.stat.isreg is defined
+ - sysconfig_docker_network_check.stat.isreg
+ notify:
+ - restart docker
+
+- name: Start the Docker service
+ systemd:
+ name: docker
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: r_docker_package_docker_start_result
+ until: not r_docker_package_docker_start_result | failed
+ retries: 3
+ delay: 30
+
+- set_fact:
+ docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}"
+
+- meta: flush_handlers
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
new file mode 100644
index 000000000..57a84bc2c
--- /dev/null
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -0,0 +1,176 @@
+---
+# If docker_options are provided we should fail. We should not install docker and ignore
+# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
+- name: Fail quickly if openshift_docker_options are set
+ assert:
+ that:
+ - docker_options is defined
+ - docker_options != ""
+ msg: |
+ Docker via System Container does not allow for the use of the openshift_docker_options
+ variable. If you want to use openshift_docker_options you will need to use the
+ traditional docker package install. Otherwise, comment out openshift_docker_options
+ in your inventory file.
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# Make sure Docker is installed so we are able to use the client
+- name: Install Docker so we can use the client
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+
+# Make sure docker is disabled. Errors are ignored.
+- name: Disable Docker
+ systemd:
+ name: docker
+ enabled: no
+ state: stopped
+ daemon_reload: yes
+ ignore_errors: True
+ register: r_docker_systemcontainer_docker_stop_result
+ until: not r_docker_systemcontainer_docker_stop_result | failed
+ retries: 3
+ delay: 30
+
+
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
+
+- block:
+
+ - name: Set to default prepend
+ set_fact:
+ l_docker_image_prepend: "gscrivano"
+
+ - name: Use Red Hat Registry for image when distribution is Red Hat
+ set_fact:
+ l_docker_image_prepend: "registry.access.redhat.com/openshift3"
+ when: ansible_distribution == 'RedHat'
+
+ - name: Use Fedora Registry for image when distribution is Fedora
+ set_fact:
+ l_docker_image_prepend: "registry.fedoraproject.org/f25"
+ when: ansible_distribution == 'Fedora'
+
+ # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
+ - name: Use a testing registry if requested
+ set_fact:
+ l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_registry_override is defined
+ - openshift_docker_systemcontainer_image_registry_override != ""
+
+ - name: Set the full image name
+ set_fact:
+ l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+
+# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
+- name: Pre-pull Container Engine System Container image
+ command: "atomic pull --storage ostree {{ l_docker_image }}"
+ changed_when: false
+ environment:
+ NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
+
+
+- name: Ensure container-engine.service.d directory exists
+ file:
+ path: "{{ container_engine_systemd_dir }}"
+ state: directory
+
+- name: Ensure /etc/docker directory exists
+ file:
+ path: "{{ docker_conf_dir }}"
+ state: directory
+
+- name: Install Container Engine System Container
+ oc_atomic_container:
+ name: "{{ openshift.docker.service_name }}"
+ image: "{{ l_docker_image }}"
+ state: latest
+
+- name: Configure Container Engine Service File
+ template:
+ dest: "{{ container_engine_systemd_dir }}/custom.conf"
+ src: systemcontainercustom.conf.j2
+
+# Set local versions of facts that must be in json format for container-daemon.json
+# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
+- set_fact:
+ l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
+ l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
+ l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}"
+ l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
+ l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
+
+# Configure container-engine using the container-daemon.json file
+# NOTE: daemon.json and container-daemon.json have been seperated to avoid
+# collision.
+- name: Configure Container Engine
+ template:
+ dest: "{{ docker_conf_dir }}/container-daemon.json"
+ src: daemon.json
+
+# Enable and start the container-engine service
+- name: Start the Container Engine service
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: r_docker_systemcontainer_docker_start_result
+ until: not r_docker_systemcontainer_docker_start_result | failed
+ retries: 3
+ delay: 30
+
+- set_fact:
+ docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+
+- meta: flush_handlers
diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json
new file mode 100644
index 000000000..a41b7cdbd
--- /dev/null
+++ b/roles/docker/templates/daemon.json
@@ -0,0 +1,20 @@
+{
+ "authorization-plugins": ["rhel-push-plugin"],
+ "default-runtime": "oci",
+ "containerd": "/run/containerd.sock",
+ "disable-legacy-registry": false,
+ "exec-opts": ["native.cgroupdriver=systemd"],
+ "insecure-registries": {{ l_docker_insecure_registries }},
+{% if docker_log_driver is defined %}
+ "log-driver": "{{ docker_log_driver }}",
+{%- endif %}
+ "log-opts": {{ l_docker_log_options }},
+ "runtimes": {
+ "oci": {
+ "path": "/usr/libexec/docker/docker-runc-current"
+ }
+ },
+ "selinux-enabled": {{ l_docker_selinux_enabled | lower }},
+ "add-registry": {{ l_docker_additional_registries }},
+ "block-registry": {{ l_docker_blocked_registries }}
+}
diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2
new file mode 100644
index 000000000..86eebfba6
--- /dev/null
+++ b/roles/docker/templates/systemcontainercustom.conf.j2
@@ -0,0 +1,17 @@
+# {{ ansible_managed }}
+
+[Service]
+{% if "http_proxy" in openshift.common %}
+Environment=HTTP_PROXY={{ docker_http_proxy }}
+{% endif -%}
+{% if "https_proxy" in openshift.common %}
+Environment=HTTPS_PROXY={{ docker_http_proxy }}
+{% endif -%}
+{% if "no_proxy" in openshift.common %}
+Environment=NO_PROXY={{ docker_no_proxy }}
+{% endif %}
+{%- if os_firewall_use_firewalld|default(false) %}
+[Unit]
+Wants=iptables.service
+After=iptables.service
+{%- endif %}
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
index 5237ed8f2..4e940b7f5 100644
--- a/roles/docker/vars/main.yml
+++ b/roles/docker/vars/main.yml
@@ -1,3 +1,5 @@
---
-udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
docker_systemd_dir: /etc/systemd/system/docker.service.d
+container_engine_systemd_dir: /etc/systemd/system/container-engine.service.d
+docker_conf_dir: /etc/docker/
+udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 29153f4df..c0d1d5946 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,10 +1,4 @@
---
-etcd_service: "{{ 'etcd' if openshift.common.is_etcd_system_container | bool or not etcd_is_containerized | bool else 'etcd_container' }}"
-etcd_client_port: 2379
-etcd_peer_port: 2380
-etcd_url_scheme: http
-etcd_peer_url_scheme: http
-
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
@@ -13,5 +7,4 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_data_dir: /var/lib/etcd/
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh
deleted file mode 100644
index 0e324a8a9..000000000
--- a/roles/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
-# command flags are different between the two. Should work on stand
-# alone etcd hosts and master + etcd hosts too because we use the peer keys.
-etcdctl2() {
- /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
-}
-
-etcdctl3() {
- ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
-}
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index e0c70a181..689c07a84 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -24,3 +24,4 @@ dependencies:
- service: etcd peering
port: "{{ etcd_peer_port }}/tcp"
- role: etcd_server_certificates
+- role: etcd_common
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index c09da3b61..8c2f392ee 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -10,51 +10,52 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
+- include_role:
+ name: etcd_common
+ vars:
+ r_etcd_common_action: drop_etcdctl
when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
-
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
+ - openshift_etcd_etcdctl_profile | default(true) | bool
+
+- block:
+ - name: Pull etcd container
+ command: docker pull {{ openshift.etcd.etcd_image }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
when:
- etcd_is_containerized | bool
- not openshift.common.is_etcd_system_container | bool
-
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
-
-- name: Create configuration directory
- file:
- path: "{{ etcd_conf_dir }}"
- state: directory
- mode: 0700
- when: etcd_is_thirdparty | bool
+- block:
+ - name: Create configuration directory
+ file:
+ path: "{{ etcd_conf_dir }}"
+ state: directory
+ mode: 0700
# TODO: retest with symlink to confirm it does or does not function
-- name: Copy service file for etcd instance
- copy:
- src: /usr/lib/systemd/system/etcd.service
- dest: "/etc/systemd/system/{{ etcd_service }}.service"
- remote_src: True
- when: etcd_is_thirdparty | bool
-
-- name: Create third party etcd service.d directory exists
- file:
- path: "{{ etcd_systemd_dir }}"
- state: directory
- when: etcd_is_thirdparty | bool
-
-- name: Configure third part etcd service unit file
- template:
- dest: "{{ etcd_systemd_dir }}/custom.conf"
- src: custom.conf.j2
+ - name: Copy service file for etcd instance
+ copy:
+ src: /usr/lib/systemd/system/etcd.service
+ dest: "/etc/systemd/system/{{ etcd_service }}.service"
+ remote_src: True
+
+ - name: Create third party etcd service.d directory exists
+ file:
+ path: "{{ etcd_systemd_dir }}"
+ state: directory
+
+ - name: Configure third part etcd service unit file
+ template:
+ dest: "{{ etcd_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
when: etcd_is_thirdparty
# TODO: this task may not be needed with Validate permissions
@@ -80,28 +81,28 @@
command: systemctl daemon-reload
when: etcd_is_thirdparty | bool
-- name: Disable system etcd when containerized
- systemd:
- name: etcd
- state: stopped
- enabled: no
- masked: yes
- daemon_reload: yes
- when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
- register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
-
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
- when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool
-
-- name: Install Etcd system container
- include: system_container.yml
- when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool
+- block:
+ - name: Disable system etcd when containerized
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ when: not openshift.common.is_etcd_system_container | bool
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
+ when: not openshift.common.is_etcd_system_container | bool
+
+ - name: Install Etcd system container
+ include: system_container.yml
+ when: openshift.common.is_etcd_system_container | bool
+ when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
file:
@@ -126,9 +127,6 @@
enabled: yes
register: start_result
-- include: etcdctl.yml
- when: openshift_etcd_etcdctl_profile | default(true) | bool
-
- name: Set fact etcd_service_status_changed
set_fact:
etcd_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 72ffadbd2..e735bf50a 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,4 +1,7 @@
---
+- set_fact:
+ l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
@@ -15,6 +18,63 @@
{%- endif -%}
{% endfor -%}
+- name: Check etcd system container package
+ command: >
+ atomic containers list --no-trunc -a -f container=etcd -f backend=ostree
+ register: etcd_result
+
+- name: Unmask etcd service
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: no
+ masked: no
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ when: "'etcd' not in etcd_result.stdout"
+
+- name: Disable etcd_container
+ systemd:
+ name: etcd_container
+ state: stopped
+ enabled: no
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+- name: Remove etcd_container.service
+ file:
+ path: /etc/systemd/system/etcd_container.service
+ state: absent
+
+- name: Systemd reload configuration
+ systemd: name=etcd_container daemon_reload=yes
+
+- name: Check for previous etcd data store
+ stat:
+ path: "{{ l_etcd_src_data_dir }}/member/"
+ register: src_datastore
+
+- name: Check for etcd system container data store
+ stat:
+ path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
+ register: dest_datastore
+
+- name: Ensure that etcd system container data dirs exist
+ file: path="{{ item }}" state=directory
+ with_items:
+ - "{{ r_etcd_common_system_container_host_dir }}/etc"
+ - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
+
+- name: Copy etcd data store
+ command: >
+ cp -a {{ l_etcd_src_data_dir }}/member
+ {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
+ when:
+ - src_datastore.stat.exists
+ - not dest_datastore.stat.exists
+
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
@@ -35,3 +95,5 @@
- ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
+ - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9151dd0bd..1b5598f46 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
#[logging]
-ETCD_DEBUG="{{ etcd_debug | default(false) | string }}"
+ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"
{% if etcd_log_package_levels is defined %}
ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
{% endif %}
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index ae059b549..adeca7a91 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -1,17 +1,17 @@
[Unit]
Description=The Etcd Server container
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
-EnvironmentFile=/etc/etcd/etcd.conf
+EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd_client_certificates/tasks/main.yml
index 450b65209..bbd29ece1 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd_client_certificates/tasks/main.yml
@@ -84,7 +84,6 @@
register: g_etcd_client_mktemp
changed_when: False
when: etcd_client_certs_missing | bool
- delegate_to: localhost
become: no
- name: Create a tarball of the etcd certs
@@ -133,8 +132,7 @@
when: etcd_client_certs_missing | bool
- name: Delete temporary directory
- file: name={{ g_etcd_client_mktemp.stdout }} state=absent
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
changed_when: False
when: etcd_client_certs_missing | bool
- delegate_to: localhost
become: no
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
index 131a01490..d1c3a6602 100644
--- a/roles/etcd_common/README.md
+++ b/roles/etcd_common/README.md
@@ -1,17 +1,21 @@
etcd_common
========================
-TODO
+Common resources for dependent etcd roles. E.g. default variables for:
+* config directories
+* certificates
+* ports
+* other settings
-Requirements
-------------
-
-TODO
+Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g.
-Role Variables
---------------
+```yaml
+- delegated_serial_command:
+ command: /usr/bin/make_database.sh arg1 arg2
+ creates: /path/to/database
+```
-TODO
+Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).
Dependencies
------------
@@ -21,7 +25,22 @@ openshift-repos
Example Playbook
----------------
-TODO
+**Drop etcdctl aliases**
+
+```yaml
+- include_role:
+ name: etcd_common
+ tasks_from: etcdctl
+```
+
+**Get access to common variables**
+
+```yaml
+# meta.yml of etcd
+...
+dependencies:
+- { role: etcd_common }
+```
License
-------
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index c5efb0a0c..b1bfa4592 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,6 +1,21 @@
---
+# Default action when calling this role
+r_etcd_common_action: noop
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
+# runc, docker, host
+r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
+
# etcd server vars
-etcd_conf_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -29,9 +44,22 @@ etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
etcd_ca_default_days: 1825
+r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
+r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
+r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
+
# etcd server & certificate vars
etcd_hostname: "{{ inventory_hostname }}"
etcd_ip: "{{ ansible_default_ipv4.address }}"
etcd_is_atomic: False
etcd_is_containerized: False
etcd_is_thirdparty: False
+
+# etcd dir vars
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+
+# etcd ports and protocols
+etcd_client_port: 2379
+etcd_peer_port: 2380
+etcd_url_scheme: http
+etcd_peer_url_scheme: http
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
new file mode 100644
index 000000000..2bc486d3f
--- /dev/null
+++ b/roles/etcd_common/tasks/backup.yml
@@ -0,0 +1,102 @@
+---
+# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
+# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
+# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
+- set_fact:
+ l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+
+- set_fact:
+ l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
+
+- set_fact:
+ l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
+
+- set_fact:
+ l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
+
+- set_fact:
+ l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+
+# TODO: replace shell module with command and update later checks
+- name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1
+ register: l_avail_disk
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+# TODO: replace shell module with command and update later checks
+- name: Check current etcd disk usage
+ shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
+ register: l_etcd_disk_usage
+ when: r_etcd_common_embedded_etcd | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+- name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_avail_disk.stdout }} Kb available.
+ when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+
+# For non containerized and non embedded we should have the correct version of
+# etcd installed already. So don't do anything.
+#
+# For containerized installs we now exec into etcd_container
+#
+# For embedded non containerized we need to ensure we have the latest version
+# etcd on the host.
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
+- name: Install latest etcd for embedded
+ package:
+ name: etcd
+ state: latest
+ when:
+ - r_etcd_common_embedded_etcd | bool
+ - not l_ostree_booted.stat.exists | bool
+
+- name: Check selinux label of '{{ l_etcd_data_dir }}'
+ command: >
+ stat -c '%C' {{ l_etcd_data_dir }}
+ register: l_etcd_selinux_labels
+
+- debug:
+ msg: "{{ l_etcd_selinux_labels }}"
+
+- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label
+ command: >
+ chcon -t svirt_sandbox_file_t "{{ l_etcd_data_dir }}"
+ when:
+ - l_etcd_selinux_labels.rc == 0
+ - "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout"
+
+- name: Generate etcd backup
+ command: >
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ l_etcd_incontainer_data_dir }}
+ --backup-dir={{ l_etcd_incontainer_backup_dir }}
+
+# According to the docs change you can simply copy snap/db
+# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
+- name: Check for v3 data store
+ stat:
+ path: "{{ l_etcd_data_dir }}/member/snap/db"
+ register: l_v3_db
+
+- name: Copy etcd v3 data store
+ command: >
+ cp -a {{ l_etcd_data_dir }}/member/snap/db
+ {{ l_etcd_backup_dir }}/member/snap/
+ when: l_v3_db.stat.exists
+
+- set_fact:
+ r_etcd_common_backup_complete: True
+
+- name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ l_etcd_backup_dir }}"
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml
index 649ad23c1..6cb456677 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/drop_etcdctl.yml
@@ -4,9 +4,9 @@
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
- copy:
- src: etcdctl.sh
- dest: /etc/profile.d/etcdctl.sh
+ template:
+ dest: "/etc/profile.d/etcdctl.sh"
+ src: etcdctl.sh.j2
mode: 0755
owner: root
group: root
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..6ed87e6c7
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Fail if invalid r_etcd_common_action provided
+ fail:
+ msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
+ when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
+
+- name: Include main action task file
+ include: "{{ r_etcd_common_action }}.yml"
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd_common/templates/etcdctl.sh.j2
new file mode 100644
index 000000000..ac7d9c72f
--- /dev/null
+++ b/roles/etcd_common/templates/etcdctl.sh.j2
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
+# command flags are different between the two. Should work on stand
+# alone etcd hosts and master + etcd hosts too because we use the peer keys.
+etcdctl2() {
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@}
+
+}
+
+etcdctl3() {
+ ETCDCTL_API=3 /usr/bin/etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@}
+}
diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml
new file mode 100644
index 000000000..00d697776
--- /dev/null
+++ b/roles/etcd_common/vars/main.yml
@@ -0,0 +1,4 @@
+---
+etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
+# Location of the service file is fixed and not meant to be changed
+etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md
new file mode 100644
index 000000000..369e78ff2
--- /dev/null
+++ b/roles/etcd_migrate/README.md
@@ -0,0 +1,53 @@
+Role Name
+=========
+
+Offline etcd migration of data from v2 to v3
+
+Requirements
+------------
+
+It is expected all consumers of the etcd data are not accessing the data.
+Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster.
+
+The role itself is responsible for:
+- checking etcd cluster health and raft status before the migration
+- checking of presence of any v3 data (in that case the migration is stopped)
+- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string)
+- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value)
+
+The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started.
+
+Role Variables
+--------------
+
+TBD
+
+Dependencies
+------------
+
+- etcd_common
+- lib_utils
+
+Example Playbook
+----------------
+
+```yaml
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - include_role:
+ name: openshift_etcd_migrate
+ vars:
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jan Chaloupka (jchaloup@redhat.com)
diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml
new file mode 100644
index 000000000..05cf41fbb
--- /dev/null
+++ b/roles/etcd_migrate/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# Default action when calling this role, choices: check, migrate, configure
+r_etcd_migrate_action: migrate
diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml
new file mode 100644
index 000000000..f3cabbef6
--- /dev/null
+++ b/roles/etcd_migrate/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jan Chaloupka
+ description: Etcd migration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: etcd_common }
+- { role: lib_utils }
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
new file mode 100644
index 000000000..b66696b55
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -0,0 +1,59 @@
+---
+- fail:
+ msg: "Currently etcd v3 migration is unsupported while we test it more thoroughly"
+ when: not openshift_enable_unsupported_configurations | default(false) | bool
+
+# Check the cluster is healthy
+- include: check_cluster_health.yml
+
+# Check if the member has v3 data already
+# Run the migration only if the data are v2
+- name: Check if there are any v3 data
+ command: >
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' get "" --from-key --keys-only -w json --limit 1
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcdctl_output
+
+- fail:
+ msg: "Unable to get a number of v3 keys"
+ when: l_etcdctl_output.rc != 0
+
+- fail:
+ msg: "The etcd has at least one v3 key"
+ when: "'count' in (l_etcdctl_output.stdout | from_json) and (l_etcdctl_output.stdout | from_json).count != 0"
+
+
+# TODO(jchaloup): once the until loop can be used over include/block,
+# remove the repetive code
+# - until loop not supported over include statement (nor block)
+# https://github.com/ansible/ansible/issues/17098
+# - with_items not supported over block
+
+# Check the cluster status for the first time
+- include: check_cluster_status.yml
+
+# Check the cluster status for the second time
+- block:
+ - debug:
+ msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}"
+ - name: Wait a while before another check
+ pause:
+ seconds: 5
+ when: not l_etcd_cluster_status_ok | bool
+
+ - include: check_cluster_status.yml
+ when: not l_etcd_cluster_status_ok | bool
+
+
+# Check the cluster status for the third time
+- block:
+ - debug:
+ msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}"
+ - name: Wait a while before another check
+ pause:
+ seconds: 5
+ when: not l_etcd_cluster_status_ok | bool
+
+ - include: check_cluster_status.yml
+ when: not l_etcd_cluster_status_ok | bool
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml
new file mode 100644
index 000000000..201d83f99
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check_cluster_health.yml
@@ -0,0 +1,23 @@
+---
+- name: Check cluster health
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: etcd_cluster_health
+ changed_when: false
+ failed_when: false
+
+- name: Assume a member is not healthy
+ set_fact:
+ etcd_member_healthy: false
+
+- name: Get member item health status
+ set_fact:
+ etcd_member_healthy: true
+ with_items: "{{ etcd_cluster_health.stdout_lines }}"
+ when: "(etcd_peer in item) and ('is healthy' in item)"
+
+- name: Check the etcd cluster health
+ # TODO(jchaloup): should we fail or ask user if he wants to continue? Or just wait until the cluster is healthy?
+ fail:
+ msg: "Etcd member {{ etcd_peer }} is not healthy"
+ when: not etcd_member_healthy
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml
new file mode 100644
index 000000000..b69fb5a52
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check_cluster_status.yml
@@ -0,0 +1,32 @@
+---
+# etcd_ip originates from etcd_common role
+- name: Check cluster status
+ command: >
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' -w json endpoint status
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcd_cluster_status
+
+- name: Retrieve raftIndex
+ set_fact:
+ etcd_member_raft_index: "{{ (l_etcd_cluster_status.stdout | from_json)[0]['Status']['raftIndex'] }}"
+
+- block:
+ # http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers
+ - name: Group all raftIndices into a list
+ set_fact:
+ etcd_members_raft_indices: "{{ groups['oo_etcd_to_migrate'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
+
+ - name: Check the minimum and the maximum of raftIndices is at most 1
+ set_fact:
+ etcd_members_raft_indices_diff: "{{ ((etcd_members_raft_indices | max | int) - (etcd_members_raft_indices | min | int)) | int }}"
+
+ - debug:
+ msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}"
+
+ when: inventory_hostname in groups.oo_etcd_to_migrate[0]
+
+# The cluster raft status is ok if the difference of the max and min raft index is at most 1
+- name: capture the status
+ set_fact:
+ l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_migrate[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd_migrate/tasks/configure.yml
new file mode 100644
index 000000000..a305d5bf3
--- /dev/null
+++ b/roles/etcd_migrate/tasks/configure.yml
@@ -0,0 +1,13 @@
+---
+- name: Configure master to use etcd3 storage backend
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ value:
+ - etcd3
+ - key: kubernetesMasterConfig.apiServerArguments.storage-media-type
+ value:
+ - application/vnd.kubernetes.protobuf
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
new file mode 100644
index 000000000..409b0b613
--- /dev/null
+++ b/roles/etcd_migrate/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Fail if invalid r_etcd_migrate_action provided
+ fail:
+ msg: "etcd_migrate role can only be called with 'check' or 'migrate' or 'configure'"
+ when: r_etcd_migrate_action not in ['check', 'migrate', 'configure']
+
+- name: Include main action task file
+ include: "{{ r_etcd_migrate_action }}.yml"
+
+# 2. migrate v2 datadir into v3:
+# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl
+# backup the etcd datadir first
+# Provide a way for an operator to specify transformer
+
+# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml
+# set storage-backend to “etcd3”
+# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master))
+
+# Run
+# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health
+# to check the cluster health (from the etcdctl.sh aliases file)
+
+# Another assumption:
+# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting)
+# -
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
new file mode 100644
index 000000000..b2cf6d20a
--- /dev/null
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -0,0 +1,64 @@
+---
+# Should this be run in a serial manner?
+- set_fact:
+ l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+
+- name: Disable etcd members
+ service:
+ name: "{{ l_etcd_service }}"
+ state: stopped
+
+# Should we skip all TTL keys? https://bugzilla.redhat.com/show_bug.cgi?id=1389773
+- name: Migrate etcd data
+ command: >
+ etcdctl migrate --data-dir={{ etcd_data_dir }}
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcdctl_migrate
+
+# TODO(jchaloup): If any of the members fails, we need to restore all members to v2 from the pre-migrate backup
+- name: Check the etcd v2 data are correctly migrated
+ fail:
+ msg: "Failed to migrate a member"
+ when: "'finished transforming keys' not in l_etcdctl_migrate.stdout and 'no v2 keys to migrate' not in l_etcdctl_migrate.stdout"
+
+- name: Migration message
+ debug:
+ msg: "Etcd migration finished with: {{ l_etcdctl_migrate.stdout }}"
+
+- name: Enable etcd member
+ service:
+ name: "{{ l_etcd_service }}"
+ state: started
+
+- name: Wait for cluster to become healthy after migration
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
+ run_once: true
+
+# NOTE: /usr/local/bin may be removed from the PATH by ansible hence why
+# it's added to the environment in this task.
+- name: Re-introduce leases (as a replacement for key TTLs)
+ command: >
+ oadm migrate etcd-ttl \
+ --cert {{ r_etcd_common_master_peer_cert_file }} \
+ --key {{ r_etcd_common_master_peer_key_file }} \
+ --cacert {{ r_etcd_common_master_peer_ca_file }} \
+ --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
+ --ttl-keys-prefix {{ item }} \
+ --lease-duration 1h
+ environment:
+ ETCDCTL_API: 3
+ PATH: "/usr/local/bin:/var/usrlocal/bin:{{ ansible_env.PATH }}"
+ with_items:
+ - "/kubernetes.io/events"
+ - "/kubernetes.io/masterleases"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+ run_once: true
+
+- set_fact:
+ r_etcd_migrate_success: true
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
index 98c913dba..b453f2bd8 100644
--- a/roles/etcd_server_certificates/meta/main.yml
+++ b/roles/etcd_server_certificates/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: openshift_etcd_ca
+- role: etcd_ca
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 956f5cc55..4795188a6 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -5,11 +5,14 @@
- name: Check status of etcd certificates
stat:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ path: "{{ item }}"
with_items:
- - "{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -107,7 +110,6 @@
register: g_etcd_server_mktemp
changed_when: False
when: etcd_server_certs_missing | bool
- delegate_to: localhost
- name: Create a tarball of the etcd certs
command: >
@@ -133,8 +135,11 @@
- name: Ensure certificate directory exists
file:
- path: "{{ etcd_cert_config_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_cert_config_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -165,22 +170,34 @@
- name: Ensure ca directory exists
file:
- path: "{{ etcd_ca_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_ca_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive etcd ca cert tarballs
+- name: Unarchive cert tarball for the system container
+ unarchive:
+ src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_system_container_cert_config_dir }}"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
+
+- name: Unarchive etcd ca cert tarballs for the system container
unarchive:
src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- when: etcd_server_certs_missing | bool
+ dest: "{{ etcd_system_container_cert_config_dir }}/ca"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
- name: Delete temporary directory
- file: name={{ g_etcd_server_mktemp.stdout }} state=absent
+ local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
become: no
changed_when: False
when: etcd_server_certs_missing | bool
- delegate_to: localhost
- name: Validate permissions on certificate files
file:
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
new file mode 100644
index 000000000..61bbba225
--- /dev/null
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+r_etcd_upgrade_action: upgrade
+r_etcd_upgrade_mechanism: rpm
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
new file mode 100644
index 000000000..afdb0267f
--- /dev/null
+++ b/roles/etcd_upgrade/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jan Chaloupka
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: etcd_common
+ r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
new file mode 100644
index 000000000..129c69d6b
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+# INPUT r_etcd_upgrade_action
+- name: Fail if invalid etcd_upgrade_action provided
+ fail:
+ msg: "etcd_upgrade role can only be called with 'upgrade'"
+ when:
+ - r_etcd_upgrade_action not in ['upgrade']
+
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
+- include: "{{ r_etcd_upgrade_action }}.yml"
diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml
new file mode 100644
index 000000000..420c9638e
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/upgrade.yml
@@ -0,0 +1,11 @@
+---
+# INPUT r_etcd_upgrade_version
+# INPUT r_etcd_upgrade_mechanism
+- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade
+ fail:
+ msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'"
+ when:
+ - r_etcd_upgrade_mechanism not in ['rpm', 'image']
+
+- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd"
+ include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/roles/etcd_upgrade/tasks/upgrade_image.yml
index 5f8b59e17..136ec1142 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/roles/etcd_upgrade/tasks/upgrade_image.yml
@@ -1,27 +1,28 @@
---
+# INPUT r_etcd_upgrade_version
- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ command: "{{ etcdctlv2 }} cluster-health"
- name: Get current image
- shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ shell: "grep 'ExecStart=' {{ etcd_service_file }} | awk '{print $NF}'"
register: current_image
- name: Set new_etcd_image
set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ r_etcd_upgrade_version ) }}"
- name: Pull new etcd image
command: "docker pull {{ new_etcd_image }}"
- name: Update to latest etcd image
replace:
- dest: /etc/systemd/system/etcd_container.service
+ dest: "{{ etcd_service_file }}"
regexp: "{{ current_image.stdout }}$"
replace: "{{ new_etcd_image }}"
- name: Restart etcd_container
systemd:
- name: etcd_container
+ name: "{{ etcd_service }}"
daemon_reload: yes
state: restarted
@@ -30,16 +31,17 @@
## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
- name: Upgrade etcd for etcdctl when not atomic
package: name=etcd state=latest
- when: not openshift.common.is_atomic | bool
+ when: not l_ostree_booted.stat.exists | bool
- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ command: "{{ etcdctlv2 }} cluster-health"
register: etcdctl
until: etcdctl.rc == 0
retries: 3
delay: 10
- name: Store new etcd_image
+ # DEPENDENCY openshift_facts
openshift_facts:
role: etcd
local_facts:
diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd_upgrade/tasks/upgrade_rpm.yml
new file mode 100644
index 000000000..324b69605
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/upgrade_rpm.yml
@@ -0,0 +1,32 @@
+---
+# INPUT r_etcd_upgrade_version?
+
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+# RHEL 7.3.4 with etcd-3.1.3-1.el7
+# RHEL 7.3.3 with etcd-3.1.0-2.el7
+# RHEL 7.3.2 with etcd-3.0.15-1.el7
+
+- name: Verify cluster is healthy pre-upgrade
+ command: "{{ etcdctlv2 }} cluster-health"
+
+- set_fact:
+ l_etcd_target_package: "{{ 'etcd' if r_etcd_upgrade_version is not defined else 'etcd-'+r_etcd_upgrade_version+'*' }}"
+
+- name: Update etcd RPM to {{ l_etcd_target_package }}
+ package:
+ name: "{{ l_etcd_target_package }}"
+ state: latest
+
+- name: Restart etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "{{ etcdctlv2 }} cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml
new file mode 100644
index 000000000..5ed919d42
--- /dev/null
+++ b/roles/etcd_upgrade/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# EXPECTS etcd_peer
+etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 94d1d18fb..02f5a5f64 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -5,4 +5,10 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
+ register: l_docker_restart_docker_in_flannel_result
+ until: not l_docker_restart_docker_in_flannel_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 2f6026fbf..1b73bfd0e 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -157,16 +157,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -900,6 +900,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -919,11 +926,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -941,7 +952,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -958,13 +969,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -984,9 +995,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1001,10 +1012,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1017,16 +1028,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1069,7 +1080,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1086,10 +1097,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1099,39 +1106,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1394,7 +1393,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1408,18 +1406,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1504,7 +1512,10 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -1520,6 +1531,10 @@ class CAServerCert(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
+ # Filter non-strings from hostnames list s.t. the omit filter
+ # may be used to conditionally add a hostname.
+ params['hostnames'] = [host for host in params['hostnames'] if isinstance(host, string_types)]
+
config = CAServerCertConfig(params['kubeconfig'],
params['debug'],
{'cert': {'value': params['cert'], 'include': True},
@@ -1551,7 +1566,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
@@ -1569,6 +1584,10 @@ class CAServerCert(OpenShiftCLI):
# -*- -*- -*- Begin included fragment: ansible/oc_adm_ca_server_cert.py -*- -*- -*-
+
+# pylint: disable=wrong-import-position
+from ansible.module_utils.six import string_types
+
def main():
'''
ansible oc adm module for ca create-server-cert
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 5f49eef39..b09321a5b 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -143,16 +143,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -886,6 +886,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -905,11 +912,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -927,7 +938,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -944,13 +955,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -970,9 +981,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -987,10 +998,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1003,16 +1014,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1055,7 +1066,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1072,10 +1083,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1085,39 +1092,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1380,7 +1379,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1394,18 +1392,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1457,7 +1465,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 7caba04f5..221ef5094 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -129,16 +129,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -891,11 +898,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -913,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -930,13 +941,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -956,9 +967,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -973,10 +984,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -989,16 +1000,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1041,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1058,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1071,39 +1078,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1366,7 +1365,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1380,18 +1378,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index aac3f7166..071562875 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -129,16 +129,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -891,11 +898,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -913,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -930,13 +941,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -956,9 +967,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -973,10 +984,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -989,16 +1000,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1041,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1058,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1071,39 +1078,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1366,7 +1365,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1380,18 +1378,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1956,7 +1964,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index b0345b026..bf2650460 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -247,16 +247,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -990,6 +990,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1009,11 +1016,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -1031,7 +1042,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1048,13 +1059,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1074,9 +1085,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1091,10 +1102,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1107,16 +1118,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1159,7 +1170,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1176,10 +1187,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1189,39 +1196,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1484,7 +1483,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1498,18 +1496,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1878,10 +1886,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1892,6 +1902,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1984,7 +1995,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1995,6 +2007,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -2007,8 +2020,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -2030,6 +2044,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -2038,6 +2056,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -2099,6 +2118,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
@@ -2301,7 +2367,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -2339,7 +2405,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
@@ -2527,25 +2593,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
@@ -2636,7 +2711,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 307269da4..a2b7d12c0 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -272,16 +272,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1015,6 +1015,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1034,11 +1041,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -1056,7 +1067,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1073,13 +1084,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1099,9 +1110,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1116,10 +1127,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1132,16 +1143,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1184,7 +1195,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1201,10 +1212,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1214,39 +1221,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1509,7 +1508,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1523,18 +1521,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1557,7 +1565,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1568,6 +1577,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1580,8 +1590,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1603,6 +1614,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1611,6 +1626,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1672,6 +1688,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
@@ -2167,10 +2230,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -2181,6 +2246,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -2685,7 +2751,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -2771,7 +2837,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
@@ -3072,7 +3138,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index d2620b4cc..955c6313e 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -65,15 +65,20 @@ options:
# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*-
-# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error
import json
+
+from distutils.version import StrictVersion
+
from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -157,7 +162,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
@@ -187,9 +194,15 @@ def main():
)
# Verify that the platform supports atomic command
- rc, _, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
+ # This module requires atomic version 1.17.2 or later
+ atomic_version = StrictVersion(version_out.replace('\n', ''))
+ if atomic_version < StrictVersion('1.17.2'):
+ module.fail_json(
+ msg="atomic version 1.17.2+ is required",
+ err=str(atomic_version))
try:
core(module)
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 308a7d806..289f08b83 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -121,16 +121,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -864,6 +864,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -883,11 +890,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -905,7 +916,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -922,13 +933,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -948,9 +959,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -965,10 +976,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -981,16 +992,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1033,7 +1044,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1050,10 +1061,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1063,39 +1070,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1358,7 +1357,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1372,18 +1370,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1527,10 +1535,10 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
@@ -1629,7 +1637,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -1665,6 +1673,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -1734,6 +1743,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 96345ffe0..7cd29215f 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -127,16 +127,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -870,6 +870,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -889,11 +896,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -911,7 +922,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -928,13 +939,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -954,9 +965,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -971,10 +982,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -987,16 +998,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1039,7 +1050,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1056,10 +1067,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1069,39 +1076,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1364,7 +1363,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1378,18 +1376,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1524,6 +1532,10 @@ class OCConfigMap(OpenShiftCLI):
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
+ if not params['name']:
+ return {'failed': True,
+ 'msg': 'Please specify a name when state is absent|present.'}
+
########
# Delete
########
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 99027c07f..5b11f45ba 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -171,16 +171,16 @@ oc_edit:
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -933,11 +940,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -955,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -972,13 +983,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -998,9 +1009,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1015,10 +1026,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1031,16 +1042,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1083,7 +1094,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1100,10 +1111,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1113,39 +1120,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1408,7 +1407,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1422,18 +1420,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 34f86a478..d3834ce0c 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -138,16 +138,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -881,6 +881,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -900,11 +907,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -922,7 +933,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -939,13 +950,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -965,9 +976,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -982,10 +993,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -998,16 +1009,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1050,7 +1061,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1067,10 +1078,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1080,39 +1087,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1375,7 +1374,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1389,18 +1387,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 00d67108d..0d751fe28 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -111,16 +111,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -854,6 +854,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -873,11 +880,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -895,7 +906,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -912,13 +923,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -938,9 +949,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -955,10 +966,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -971,16 +982,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1023,7 +1034,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1040,10 +1051,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1053,39 +1060,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1348,7 +1347,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1362,18 +1360,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index ee918a2d1..3a6ba3e56 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -130,16 +130,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -873,6 +873,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -892,11 +899,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -914,7 +925,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -931,13 +942,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -957,9 +968,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -974,10 +985,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -990,16 +1001,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1042,7 +1053,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1059,10 +1070,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1072,39 +1079,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1367,7 +1366,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1381,18 +1379,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 62b6049c4..5db036b23 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -147,16 +147,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -890,6 +890,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -909,11 +916,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -931,7 +942,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -948,13 +959,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -974,9 +985,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -991,10 +1002,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1007,16 +1018,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1059,7 +1070,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1076,10 +1087,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1089,39 +1096,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1384,7 +1383,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1398,18 +1396,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1551,9 +1559,9 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
- if 'labels' in result['results'][0]['metadata']:
+ if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 075c286e0..9b0c0e0e4 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -90,15 +90,15 @@ options:
required: false
default: str
aliases: []
- all_namespace:
+ all_namespaces:
description:
- - The namespace where the object lives.
+ - Search in all namespaces for the object.
required: false
default: false
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
@@ -150,16 +150,16 @@ register: router_output
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -893,6 +893,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -912,11 +919,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -934,7 +945,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -951,13 +962,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -977,9 +988,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -994,10 +1005,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1010,16 +1021,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1062,7 +1073,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1079,10 +1090,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1092,39 +1099,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
- else:
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1387,7 +1386,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1401,18 +1399,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1430,7 +1438,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -1439,21 +1447,26 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -1529,24 +1542,31 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -1572,7 +1592,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -1587,7 +1607,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -1606,7 +1626,7 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
# -*- -*- -*- End included fragment: class/oc_obj.py -*- -*- -*-
@@ -1634,7 +1654,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index d65e1d4c9..130521761 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -82,16 +82,16 @@ oc_objectvalidator:
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -825,6 +825,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -844,11 +851,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -866,7 +877,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -883,13 +894,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -909,9 +920,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -926,10 +937,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -942,16 +953,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -994,7 +1005,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1011,10 +1022,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1024,39 +1031,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1319,7 +1318,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1333,18 +1331,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1387,8 +1395,10 @@ class OCObjectValidator(OpenShiftCLI):
# check if it uses a reserved name
name = namespace['metadata']['name']
if not any((name == 'kube',
+ name == 'kubernetes',
name == 'openshift',
name.startswith('kube-'),
+ name.startswith('kubernetes-'),
name.startswith('openshift-'),)):
return False
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index d487746eb..c6568d520 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -139,16 +139,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -882,6 +882,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -901,11 +908,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -923,7 +934,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -940,13 +951,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -966,9 +977,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -983,10 +994,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -999,16 +1010,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1051,7 +1062,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1068,10 +1079,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1081,39 +1088,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1376,7 +1375,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1390,18 +1388,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1439,7 +1447,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
@@ -1545,7 +1553,7 @@ class OCProcess(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
- return {"changed" : False, "results": api_rval, "state": "list"}
+ return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
@@ -1567,9 +1575,9 @@ class OCProcess(OpenShiftCLI):
return {"failed": True, "msg": api_rval}
if params['create']:
- return {"changed": True, "results": api_rval, "state": "present"}
+ return {"changed": True, "results": api_rval, "state": state}
- return {"changed": False, "results": api_rval, "state": "present"}
+ return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
@@ -1584,13 +1592,13 @@ class OCProcess(OpenShiftCLI):
update = True
if not update:
- return {"changed": update, "results": api_rval, "state": "present"}
+ return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
- return {"failed": True, "changed": update, "results": rval, "state": "present"}
+ return {"failed": True, "changed": update, "msg": rval, "state": state}
- return {"changed": update, "results": rval, "state": "present"}
+ return {"changed": update, "results": rval, "state": state}
# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 3fddce055..a78bc06d2 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -136,16 +136,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -879,6 +879,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -898,11 +905,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -920,7 +931,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -937,13 +948,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -963,9 +974,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -980,10 +991,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -996,16 +1007,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1048,7 +1059,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1065,10 +1076,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1078,39 +1085,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1373,7 +1372,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1387,18 +1385,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index d63f6e063..a88639bfc 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -131,16 +131,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -874,6 +874,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -893,11 +900,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -915,7 +926,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -932,13 +943,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -958,9 +969,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -975,10 +986,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -991,16 +1002,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1043,7 +1054,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1060,10 +1071,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1073,39 +1080,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1368,7 +1367,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1382,18 +1380,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index daddec69f..0c0bc9386 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -181,16 +181,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -924,6 +924,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -943,11 +950,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -965,7 +976,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -982,13 +993,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1008,9 +1019,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1025,10 +1036,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1041,16 +1052,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1093,7 +1104,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1110,10 +1121,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1123,39 +1130,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1418,7 +1417,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1432,18 +1430,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 92e9362be..f112b6dd0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -125,16 +125,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -868,6 +868,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -887,11 +894,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -909,7 +920,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -926,13 +937,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -952,9 +963,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -969,10 +980,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -985,16 +996,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1037,7 +1048,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1054,10 +1065,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1067,39 +1074,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1362,7 +1361,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1376,18 +1374,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 1ffdce4df..d762e0c38 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -108,6 +108,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
@@ -171,16 +177,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -914,6 +920,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -933,11 +946,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -955,7 +972,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -972,13 +989,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -998,9 +1015,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1015,10 +1032,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1031,16 +1048,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1083,7 +1100,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1100,10 +1117,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1113,39 +1126,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1408,7 +1413,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1422,18 +1426,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1450,10 +1464,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1464,6 +1480,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1553,12 +1570,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -1582,13 +1601,17 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1601,7 +1624,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -1613,7 +1636,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -1622,6 +1645,10 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1634,6 +1661,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
@@ -1683,7 +1711,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -1700,7 +1728,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
@@ -1756,6 +1784,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 77056d5de..769b75e15 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -140,6 +140,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -177,16 +184,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -920,6 +927,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -939,11 +953,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -961,7 +979,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -978,13 +996,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1004,9 +1022,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1021,10 +1039,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1037,16 +1055,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1089,7 +1107,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1106,10 +1124,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1119,39 +1133,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
- else:
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1414,7 +1420,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1428,18 +1433,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1462,7 +1477,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1473,6 +1489,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1485,8 +1502,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1508,6 +1526,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1516,6 +1538,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1577,6 +1600,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_service.py -*- -*- -*-
@@ -1599,13 +1669,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -1674,6 +1746,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
@@ -1775,6 +1848,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 807bfc992..446987eff 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -123,16 +123,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -885,11 +892,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -907,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -924,13 +935,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -950,9 +961,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -967,10 +978,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -983,16 +994,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1035,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1052,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1065,39 +1072,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1360,7 +1359,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1374,18 +1372,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index c8f4ebef7..c7eb1986a 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -123,16 +123,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -885,11 +892,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -907,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -924,13 +935,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -950,9 +961,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -967,10 +978,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -983,16 +994,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1035,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1052,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1065,39 +1072,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1360,7 +1359,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1374,18 +1372,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
new file mode 100644
index 000000000..686119c65
--- /dev/null
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -0,0 +1,1685 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/storageclass -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_storageclass
+short_description: Create, modify, and idempotently manage openshift storageclasses.
+description:
+ - Manage openshift storageclass objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: False
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ provisioner:
+ description:
+ - Any annotations to add to the storageclass
+ required: false
+ default: 'aws-ebs'
+ aliases: []
+ default_storage_class:
+ description:
+ - Whether or not this is the default storage class
+ required: false
+ default: False
+ aliases: []
+ parameters:
+ description:
+ - A dictionary with the parameters to configure the storageclass. This will be based on provisioner
+ required: false
+ default: None
+ aliases: []
+ api_version:
+ description:
+ - The api version.
+ required: false
+ default: v1
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: get storageclass
+ run_once: true
+ oc_storageclass:
+ name: gp2
+ state: list
+ register: registry_sc_out
+
+- name: create the storageclass
+ oc_storageclass:
+ run_once: true
+ name: gp2
+ parameters:
+ type: gp2
+ encrypted: 'true'
+ kmsKeyId: '<full kms key arn>'
+ provisioner: aws-ebs
+ default_storage_class: False
+ register: sc_out
+ notify:
+ - restart openshift master services
+'''
+
+# -*- -*- -*- End included fragment: doc/storageclass -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "cmd": ' '.join(cmds)}
+
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
+ rval.update({"stderr": stderr,
+ "stdout": stdout})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/storageclass.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class StorageClassConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ provisioner,
+ parameters=None,
+ annotations=None,
+ default_storage_class="false",
+ api_version='v1',
+ kubeconfig='/etc/origin/master/admin.kubeconfig'):
+ ''' constructor for handling storageclass options '''
+ self.name = name
+ self.parameters = parameters
+ self.annotations = annotations
+ self.provisioner = provisioner
+ self.api_version = api_version
+ self.default_storage_class = str(default_storage_class).lower()
+ self.kubeconfig = kubeconfig
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a storageclass dict '''
+ self.data['apiVersion'] = self.api_version
+ self.data['kind'] = 'StorageClass'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+
+ self.data['metadata']['annotations'] = {}
+ if self.annotations is not None:
+ self.data['metadata']['annotations'] = self.annotations
+
+ self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
+ self.default_storage_class
+
+ self.data['provisioner'] = self.provisioner
+
+ self.data['parameters'] = {}
+ if self.parameters is not None:
+ self.data['parameters'].update(self.parameters)
+
+ # default to aws if no params were passed
+ else:
+ self.data['parameters']['type'] = 'gp2'
+
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class StorageClass(Yedit):
+ ''' Class to model the oc storageclass object '''
+ annotations_path = "metadata.annotations"
+ provisioner_path = "provisioner"
+ parameters_path = "parameters"
+ kind = 'StorageClass'
+
+ def __init__(self, content):
+ '''StorageClass constructor'''
+ super(StorageClass, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' get a list of ports '''
+ return self.get(StorageClass.annotations_path) or {}
+
+ def get_parameters(self):
+ ''' get the service selector'''
+ return self.get(StorageClass.parameters_path) or {}
+
+# -*- -*- -*- End included fragment: lib/storageclass.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_storageclass.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCStorageClass(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'storageclass'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCStorageClass '''
+ super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
+ self.config = config
+ self.storage_class = None
+
+ def exists(self):
+ ''' return whether a storageclass exists'''
+ if self.storage_class:
+ return True
+
+ return False
+
+ def get(self):
+ '''return storageclass '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.storage_class = StorageClass(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # parameters are currently unable to be updated. need to delete and recreate
+ self.delete()
+ # pause here and attempt to wait for delete.
+ # Better option would be to poll
+ import time
+ time.sleep(5)
+ return self.create()
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # check if params have updated
+ if self.storage_class.get_parameters() != self.config.parameters:
+ return True
+
+ for anno_key, anno_value in self.storage_class.get_annotations().items():
+ if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = StorageClassConfig(params['name'],
+ provisioner="kubernetes.io/{}".format(params['provisioner']),
+ parameters=params['parameters'],
+ annotations=params['annotations'],
+ api_version="storage.k8s.io/{}".format(params['api_version']),
+ default_storage_class=params.get('default_storage_class', 'false'),
+ kubeconfig=params['kubeconfig'],
+ )
+
+ oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sc.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sc.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sc.needs_update():
+ api_rval = oc_sc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
+
+# -*- -*- -*- End included fragment: class/oc_storageclass.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_storageclass.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for storageclass
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
+ parameters=dict(default=None, type='dict'),
+ provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ api_version=dict(default='v1', type='str'),
+ default_storage_class=dict(default="false", type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCStorageClass.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_storageclass.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index aa9f07980..3a98693b7 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -183,16 +183,16 @@ ok: [ded-int-aws-master-61034] => {
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -926,6 +926,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -945,11 +952,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -967,7 +978,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -984,13 +995,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1010,9 +1021,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1027,10 +1038,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1043,16 +1054,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1095,7 +1106,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1112,10 +1123,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1125,39 +1132,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1420,7 +1419,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1434,18 +1432,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index eb293322d..939261526 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -95,16 +95,16 @@ oc_version:
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -838,6 +838,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -857,11 +864,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -879,7 +890,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -896,13 +907,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -922,9 +933,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -939,10 +950,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -955,16 +966,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1007,7 +1018,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1024,10 +1035,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1037,39 +1044,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1332,7 +1331,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1346,18 +1344,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 23b292763..41e7d0ab8 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -80,6 +80,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
@@ -160,16 +172,16 @@ EXAMPLES = '''
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -903,6 +915,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -922,11 +941,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -944,7 +967,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -961,13 +984,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -987,9 +1010,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1004,10 +1027,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1020,16 +1043,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1072,7 +1095,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1089,10 +1112,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1102,39 +1121,31 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1397,7 +1408,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1411,18 +1421,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
index 10f1c9b4b..fc394cb43 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
@@ -1,6 +1,10 @@
# pylint: skip-file
# flake8: noqa
+
+# pylint: disable=wrong-import-position
+from ansible.module_utils.six import string_types
+
def main():
'''
ansible oc adm module for ca create-server-cert
diff --git a/roles/lib_openshift/src/ansible/oc_adm_registry.py b/roles/lib_openshift/src/ansible/oc_adm_registry.py
index c85973c7d..d669a3488 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_registry.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_registry.py
@@ -17,7 +17,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_adm_router.py b/roles/lib_openshift/src/ansible/oc_adm_router.py
index b6f8e90d0..c6563cc2f 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_router.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_router.py
@@ -21,7 +21,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 20d75cb63..7b81760df 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -1,15 +1,20 @@
# pylint: skip-file
# flake8: noqa
-# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error
import json
+
+from distutils.version import StrictVersion
+
from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -93,7 +98,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
@@ -123,9 +130,15 @@ def main():
)
# Verify that the platform supports atomic command
- rc, _, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
+ # This module requires atomic version 1.17.2 or later
+ atomic_version = StrictVersion(version_out.replace('\n', ''))
+ if atomic_version < StrictVersion('1.17.2'):
+ module.fail_json(
+ msg="atomic version 1.17.2+ is required",
+ err=str(atomic_version))
try:
core(module)
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
index 701740e4f..6ab53d044 100644
--- a/roles/lib_openshift/src/ansible/oc_obj.py
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -23,7 +23,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py
index 1337cbbe5..faa7c1772 100644
--- a/roles/lib_openshift/src/ansible/oc_secret.py
+++ b/roles/lib_openshift/src/ansible/oc_secret.py
@@ -15,6 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/src/ansible/oc_service.py b/roles/lib_openshift/src/ansible/oc_service.py
index 9eb144e9c..b90c08255 100644
--- a/roles/lib_openshift/src/ansible/oc_service.py
+++ b/roles/lib_openshift/src/ansible/oc_service.py
@@ -21,6 +21,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py
new file mode 100644
index 000000000..e9f3ebbd3
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_storageclass.py
@@ -0,0 +1,32 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for storageclass
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
+ parameters=dict(default=None, type='dict'),
+ provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ api_version=dict(default='v1', type='str'),
+ default_storage_class=dict(default="false", type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCStorageClass.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index fa0c4e3af..37a64e4ef 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -77,7 +77,10 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -93,6 +96,10 @@ class CAServerCert(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
+ # Filter non-strings from hostnames list s.t. the omit filter
+ # may be used to conditionally add a hostname.
+ params['hostnames'] = [host for host in params['hostnames'] if isinstance(host, string_types)]
+
config = CAServerCertConfig(params['kubeconfig'],
params['debug'],
{'cert': {'value': params['cert'], 'include': True},
@@ -124,7 +131,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_adm_manage_node.py b/roles/lib_openshift/src/class/oc_adm_manage_node.py
index c07320477..6d9f24baa 100644
--- a/roles/lib_openshift/src/class/oc_adm_manage_node.py
+++ b/roles/lib_openshift/src/class/oc_adm_manage_node.py
@@ -44,7 +44,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index 88fcc1ddc..37a685ebb 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 25519c9c9..ad6869bb6 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -105,7 +105,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -143,7 +143,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
@@ -331,25 +331,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 356d06fdf..0d50116d1 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -136,7 +136,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -222,7 +222,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
index 1d3d977db..ae6795446 100644
--- a/roles/lib_openshift/src/class/oc_clusterrole.py
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/src/class/oc_configmap.py b/roles/lib_openshift/src/class/oc_configmap.py
index 87de3e1df..de77d1102 100644
--- a/roles/lib_openshift/src/class/oc_configmap.py
+++ b/roles/lib_openshift/src/class/oc_configmap.py
@@ -127,6 +127,10 @@ class OCConfigMap(OpenShiftCLI):
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
+ if not params['name']:
+ return {'failed': True,
+ 'msg': 'Please specify a name when state is absent|present.'}
+
########
# Delete
########
diff --git a/roles/lib_openshift/src/class/oc_label.py b/roles/lib_openshift/src/class/oc_label.py
index bd312c170..0a6895177 100644
--- a/roles/lib_openshift/src/class/oc_label.py
+++ b/roles/lib_openshift/src/class/oc_label.py
@@ -134,9 +134,9 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
- if 'labels' in result['results'][0]['metadata']:
+ if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 51d3ce996..5e423bea9 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -10,7 +10,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -19,21 +19,26 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -109,24 +114,31 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -152,7 +164,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -167,7 +179,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -186,4 +198,4 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_objectvalidator.py b/roles/lib_openshift/src/class/oc_objectvalidator.py
index 43f6cac67..c9fd3b532 100644
--- a/roles/lib_openshift/src/class/oc_objectvalidator.py
+++ b/roles/lib_openshift/src/class/oc_objectvalidator.py
@@ -35,8 +35,10 @@ class OCObjectValidator(OpenShiftCLI):
# check if it uses a reserved name
name = namespace['metadata']['name']
if not any((name == 'kube',
+ name == 'kubernetes',
name == 'openshift',
name.startswith('kube-'),
+ name.startswith('kubernetes-'),
name.startswith('openshift-'),)):
return False
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
index 9d29938aa..62a6bd571 100644
--- a/roles/lib_openshift/src/class/oc_process.py
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -30,7 +30,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
@@ -136,7 +136,7 @@ class OCProcess(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
- return {"changed" : False, "results": api_rval, "state": "list"}
+ return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
@@ -158,9 +158,9 @@ class OCProcess(OpenShiftCLI):
return {"failed": True, "msg": api_rval}
if params['create']:
- return {"changed": True, "results": api_rval, "state": "present"}
+ return {"changed": True, "results": api_rval, "state": state}
- return {"changed": False, "results": api_rval, "state": "present"}
+ return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
@@ -175,11 +175,11 @@ class OCProcess(OpenShiftCLI):
update = True
if not update:
- return {"changed": update, "results": api_rval, "state": "present"}
+ return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
- return {"failed": True, "changed": update, "results": rval, "state": "present"}
+ return {"failed": True, "changed": update, "msg": rval, "state": state}
- return {"changed": update, "results": rval, "state": "present"}
+ return {"changed": update, "results": rval, "state": state}
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index deb36a9fa..4ee6443e9 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -13,12 +13,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -42,13 +44,17 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -61,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -73,7 +79,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -82,6 +88,10 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -94,6 +104,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
@@ -143,7 +154,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -160,7 +171,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index 20cf23df5..7268a0c88 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -19,13 +19,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -94,6 +96,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_storageclass.py b/roles/lib_openshift/src/class/oc_storageclass.py
new file mode 100644
index 000000000..aced586ae
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_storageclass.py
@@ -0,0 +1,155 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCStorageClass(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'storageclass'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCStorageClass '''
+ super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
+ self.config = config
+ self.storage_class = None
+
+ def exists(self):
+ ''' return whether a storageclass exists'''
+ if self.storage_class:
+ return True
+
+ return False
+
+ def get(self):
+ '''return storageclass '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.storage_class = StorageClass(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # parameters are currently unable to be updated. need to delete and recreate
+ self.delete()
+ # pause here and attempt to wait for delete.
+ # Better option would be to poll
+ import time
+ time.sleep(5)
+ return self.create()
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # check if params have updated
+ if self.storage_class.get_parameters() != self.config.parameters:
+ return True
+
+ for anno_key, anno_value in self.storage_class.get_annotations().items():
+ if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = StorageClassConfig(params['name'],
+ provisioner="kubernetes.io/{}".format(params['provisioner']),
+ parameters=params['parameters'],
+ annotations=params['annotations'],
+ api_version="storage.k8s.io/{}".format(params['api_version']),
+ default_storage_class=params.get('default_storage_class', 'false'),
+ kubeconfig=params['kubeconfig'],
+ )
+
+ oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sc.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sc.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sc.needs_update():
+ api_rval = oc_sc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
index e44843eb3..c6504ed01 100644
--- a/roles/lib_openshift/src/doc/obj
+++ b/roles/lib_openshift/src/doc/obj
@@ -39,15 +39,15 @@ options:
required: false
default: str
aliases: []
- all_namespace:
+ all_namespaces:
description:
- - The namespace where the object lives.
+ - Search in all namespaces for the object.
required: false
default: false
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret
index 5c2bd9bc0..76b147f6f 100644
--- a/roles/lib_openshift/src/doc/secret
+++ b/roles/lib_openshift/src/doc/secret
@@ -57,6 +57,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
diff --git a/roles/lib_openshift/src/doc/service b/roles/lib_openshift/src/doc/service
index 418f91dc5..ba9aa0b38 100644
--- a/roles/lib_openshift/src/doc/service
+++ b/roles/lib_openshift/src/doc/service
@@ -89,6 +89,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_openshift/src/doc/storageclass b/roles/lib_openshift/src/doc/storageclass
new file mode 100644
index 000000000..5a7320d55
--- /dev/null
+++ b/roles/lib_openshift/src/doc/storageclass
@@ -0,0 +1,86 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_storageclass
+short_description: Create, modify, and idempotently manage openshift storageclasses.
+description:
+ - Manage openshift storageclass objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: False
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ provisioner:
+ description:
+ - Any annotations to add to the storageclass
+ required: false
+ default: 'aws-ebs'
+ aliases: []
+ default_storage_class:
+ description:
+ - Whether or not this is the default storage class
+ required: false
+ default: False
+ aliases: []
+ parameters:
+ description:
+ - A dictionary with the parameters to configure the storageclass. This will be based on provisioner
+ required: false
+ default: None
+ aliases: []
+ api_version:
+ description:
+ - The api version.
+ required: false
+ default: v1
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: get storageclass
+ run_once: true
+ oc_storageclass:
+ name: gp2
+ state: list
+ register: registry_sc_out
+
+- name: create the storageclass
+ oc_storageclass:
+ run_once: true
+ name: gp2
+ parameters:
+ type: gp2
+ encrypted: 'true'
+ kmsKeyId: '<full kms key arn>'
+ provisioner: aws-ebs
+ default_storage_class: False
+ register: sc_out
+ notify:
+ - restart openshift master services
+'''
diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume
index 1d04afeef..43ff78c9f 100644
--- a/roles/lib_openshift/src/doc/volume
+++ b/roles/lib_openshift/src/doc/volume
@@ -29,6 +29,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py
index 3f23455b5..2570f51dd 100755
--- a/roles/lib_openshift/src/generate.py
+++ b/roles/lib_openshift/src/generate.py
@@ -5,12 +5,16 @@
import argparse
import os
+import re
import yaml
import six
OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+SKIP_COVERAGE_PATTERN = [re.compile('class Yedit.*$'),
+ re.compile('class Utils.*$')]
+PRAGMA_STRING = ' # pragma: no cover'
class GenerateAnsibleException(Exception):
@@ -72,6 +76,11 @@ def generate(parts):
if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
continue
+ for skip in SKIP_COVERAGE_PATTERN:
+ if re.match(skip, line):
+ line = line.strip()
+ line += PRAGMA_STRING + os.linesep
+
data.write(line)
fragment_banner(fpart, "footer", data)
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 132c586c9..16770b22d 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -76,6 +76,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -95,11 +102,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -117,7 +128,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -134,13 +145,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -160,9 +171,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -177,10 +188,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -193,16 +204,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -245,7 +256,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -262,10 +273,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -275,34 +282,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -570,7 +569,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -584,18 +582,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py
index 4590dcf90..fe5ed9723 100644
--- a/roles/lib_openshift/src/lib/rule.py
+++ b/roles/lib_openshift/src/lib/rule.py
@@ -136,9 +136,9 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py
index 75c32e8b1..a1c202442 100644
--- a/roles/lib_openshift/src/lib/secret.py
+++ b/roles/lib_openshift/src/lib/secret.py
@@ -9,10 +9,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -23,6 +25,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
diff --git a/roles/lib_openshift/src/lib/service.py b/roles/lib_openshift/src/lib/service.py
index eef568779..0e8cc3aa5 100644
--- a/roles/lib_openshift/src/lib/service.py
+++ b/roles/lib_openshift/src/lib/service.py
@@ -15,7 +15,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -26,6 +27,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -38,8 +40,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -61,6 +64,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -69,6 +76,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -129,3 +137,50 @@ class Service(Yedit):
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
diff --git a/roles/lib_openshift/src/lib/storageclass.py b/roles/lib_openshift/src/lib/storageclass.py
new file mode 100644
index 000000000..c49a3066a
--- /dev/null
+++ b/roles/lib_openshift/src/lib/storageclass.py
@@ -0,0 +1,73 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class StorageClassConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ provisioner,
+ parameters=None,
+ annotations=None,
+ default_storage_class="false",
+ api_version='v1',
+ kubeconfig='/etc/origin/master/admin.kubeconfig'):
+ ''' constructor for handling storageclass options '''
+ self.name = name
+ self.parameters = parameters
+ self.annotations = annotations
+ self.provisioner = provisioner
+ self.api_version = api_version
+ self.default_storage_class = str(default_storage_class).lower()
+ self.kubeconfig = kubeconfig
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a storageclass dict '''
+ self.data['apiVersion'] = self.api_version
+ self.data['kind'] = 'StorageClass'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+
+ self.data['metadata']['annotations'] = {}
+ if self.annotations is not None:
+ self.data['metadata']['annotations'] = self.annotations
+
+ self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
+ self.default_storage_class
+
+ self.data['provisioner'] = self.provisioner
+
+ self.data['parameters'] = {}
+ if self.parameters is not None:
+ self.data['parameters'].update(self.parameters)
+
+ # default to aws if no params were passed
+ else:
+ self.data['parameters']['type'] = 'gp2'
+
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class StorageClass(Yedit):
+ ''' Class to model the oc storageclass object '''
+ annotations_path = "metadata.annotations"
+ provisioner_path = "provisioner"
+ parameters_path = "parameters"
+ kind = 'StorageClass'
+
+ def __init__(self, content):
+ '''StorageClass constructor'''
+ super(StorageClass, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' get a list of ports '''
+ return self.get(StorageClass.annotations_path) or {}
+
+ def get_parameters(self):
+ ''' get the service selector'''
+ return self.get(StorageClass.parameters_path) or {}
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index 9fa2a6c0e..e9b6bf261 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -263,6 +263,17 @@ oc_service.py:
- class/oc_service.py
- ansible/oc_service.py
+oc_storageclass.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/storageclass
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/storageclass.py
+- class/oc_storageclass.py
+- ansible/oc_storageclass.py
+
oc_user.py:
- doc/generated
- doc/license
diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
index 6990a11a8..f350bd25d 100644
--- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
+++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in testing
'''
diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml
index b4e721407..22cf687c5 100755
--- a/roles/lib_openshift/src/test/integration/oc_label.yml
+++ b/roles/lib_openshift/src/test/integration/oc_label.yml
@@ -15,7 +15,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} not defined"
- when: "{{ item }} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/integration/oc_obj.yml b/roles/lib_openshift/src/test/integration/oc_obj.yml
new file mode 100755
index 000000000..c22a2f6a9
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_obj.yml
@@ -0,0 +1,207 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_obj.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create test project
+ oc_project:
+ name: test
+ description: all things test
+ node_selector: ""
+
+ # Create Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: fetch created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0].metadata.name == 'mysql'
+ # End Create Check #
+
+
+ # Delete Check #
+ - name: delete created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: absent
+ namespace: test
+ register: dcout
+
+ - name: fetch delete dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - "'\"mysql\" not found' in dcout.results.stderr"
+ # End Delete Check #
+
+ # Delete selector Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: delete using selector
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: absent
+ register: dcout
+
+ - debug: var=dcout
+
+ - name: get the dc
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: list
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0]["items"]|length == 0
diff --git a/roles/lib_openshift/src/test/integration/oc_service.yml b/roles/lib_openshift/src/test/integration/oc_service.yml
index 3eb6facef..29535f24a 100755
--- a/roles/lib_openshift/src/test/integration/oc_service.yml
+++ b/roles/lib_openshift/src/test/integration/oc_service.yml
@@ -18,6 +18,9 @@
test-registtry: default
session_affinity: ClientIP
service_type: ClusterIP
+ labels:
+ component: test-registry
+ infra: registry
register: svc_out
- debug: var=svc_out
@@ -25,6 +28,8 @@
that:
- "svc_out.results.results[0]['metadata']['name'] == 'test-registry'"
- svc_out.changed
+ - "svc_out.results.results[0]['metadata']['labels']['component'] == 'test-registry'"
+ - "svc_out.results.results[0]['metadata']['labels']['infra'] == 'registry'"
msg: service create failed.
# Test idempotent create
diff --git a/roles/lib_openshift/src/test/integration/oc_storageclass.yml b/roles/lib_openshift/src/test/integration/oc_storageclass.yml
new file mode 100755
index 000000000..c82f9dedb
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_storageclass.yml
@@ -0,0 +1,87 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_storageclass.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create a storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ default_storage_class: "true"
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - "sc_out.results.results[0]['metadata']['name'] == 'testsc'"
+ - sc_out.changed
+ - "sc_out.results.results[0]['parameters']['type'] == 'gp2'"
+ msg: storageclass create failed.
+
+ # Test idempotent create
+ - name: NOOP create the storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ default_storage_class: "true"
+ register: sc_out
+
+ - assert:
+ that:
+ - "sc_out.results.results[0]['metadata']['name'] == 'testsc'"
+ - sc_out.changed == False
+ msg: storageclass create failed. No changes expected
+
+ - name: test list storageclass
+ oc_storageclass:
+ name: testsc
+ state: list
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that: "sc_out.results[0]['metadata']['name'] == 'testsc'"
+ msg: storageclass list failed
+
+ - name: update the storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ encrypted: "true"
+ default_storage_class: "true"
+ register: sc_out
+
+ - assert:
+ that: "sc_out.results.results[0]['parameters']['encrypted'] == 'true'"
+ msg: storageclass update failed
+
+ - name: oc delete storageclass
+ oc_storageclass:
+ name: testsc
+ state: absent
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - "sc_out.results['returncode'] == 0"
+ - "sc_out.results.results == {}"
+ msg: storageclass delete failed
+
+ - name: oc get storageclass
+ oc_storageclass:
+ name: testsc
+ state: list
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - sc_out.changed == False
+ - "sc_out.results == [{}]"
+ msg: storageclass get failed
diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml
index ad1f9d188..9b4290052 100755
--- a/roles/lib_openshift/src/test/integration/oc_user.yml
+++ b/roles/lib_openshift/src/test/integration/oc_user.yml
@@ -14,7 +14,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} no defined"
- when: "{{ item}} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index bab36fddc..77787fe87 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase):
}
]}'''
+ @mock.patch('oc_adm_registry.locate_oc_binary')
@mock.patch('oc_adm_registry.Utils._write')
@mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_registry.Registry._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing state present '''
params = {'state': 'present',
'debug': False,
@@ -217,7 +218,7 @@ class RegistryTest(unittest.TestCase):
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'images': None,
'latest_images': None,
- 'labels': None,
+ 'labels': {"docker-registry": "default", "another-label": "val"},
'ports': ['5000'],
'replicas': 1,
'selector': 'type=infra',
@@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase):
(0, '', ''),
]
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- '/tmp/mocked_kubeconfig',
- ]
+ mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig'
+
+ mock_oc_binary.return_value = 'oc'
results = Registry.run_ansible(params, False)
@@ -254,7 +254,8 @@ class RegistryTest(unittest.TestCase):
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False',
+ mock.call(['oc', 'adm', 'registry',
+ "--labels=another-label=val,docker-registry=default",
'--ports=5000', '--replicas=1', '--selector=type=infra',
'--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 51393dbaf..dcf768e08 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase):
]
}'''
+ @mock.patch('oc_adm_router.locate_oc_binary')
@mock.patch('oc_adm_router.Utils._write')
@mock.patch('oc_adm_router.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_router.Router._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing a create '''
params = {'state': 'present',
'debug': False,
@@ -299,7 +300,7 @@ class RouterTest(unittest.TestCase):
'cert_file': None,
'key_file': None,
'cacert_file': None,
- 'labels': None,
+ 'labels': {"router": "router", "another-label": "val"},
'ports': ['80:80', '443:443'],
'images': None,
'latest_images': None,
@@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
results = Router.run_ansible(params, False)
self.assertTrue(results['changed'])
@@ -358,6 +363,7 @@ class RouterTest(unittest.TestCase):
mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False',
+ "--labels=another-label=val,router=router",
'--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router',
'--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
index da326742f..b19a5a880 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase):
maxDiff = None
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are empty '''
# Arrange
@@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when we fail to get objects '''
# Arrange
@@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
error_results = {
'returncode': 1,
'stderr': 'Error.',
@@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are valid '''
# Arrange
@@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when all objects are invalid '''
# Arrange
@@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index e31393793..323b3423c 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -38,6 +38,7 @@ class OCSecretTest(unittest.TestCase):
'state': 'present',
'namespace': 'default',
'name': 'testsecretname',
+ 'type': 'Opaque',
'contents': [{
'path': "/tmp/somesecret.json",
'data': "{'one': 1, 'two': 2, 'three': 3}",
@@ -47,6 +48,7 @@ class OCSecretTest(unittest.TestCase):
'debug': False,
'files': None,
'delete_after': True,
+ 'force': False,
}
# Return values of our mocked function call. These get returned once per call.
@@ -74,7 +76,7 @@ class OCSecretTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'secrets', 'new', 'testsecretname', '--type=Opaque', mock.ANY, '-n', 'default'], None),
])
mock_write.assert_has_calls([
diff --git a/roles/lib_openshift/src/test/unit/test_oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index e74c66665..9c21a262f 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -39,6 +39,7 @@ class OCServiceTest(unittest.TestCase):
'selector': None,
'session_affinity': None,
'service_type': None,
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -124,6 +125,7 @@ class OCServiceTest(unittest.TestCase):
'selector': {'router': 'router'},
'session_affinity': 'ClientIP',
'service_type': 'ClusterIP',
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -303,3 +305,183 @@ class OCServiceTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_labels(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': None,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_external_ips(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': ['1.2.3.4', '5.6.7.8'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "externalIPs": ["1.2.3.4", "5.6.7.8"],
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+ self.assertEqual(results['results']['results'][0]['spec']['externalIPs'], ["1.2.3.4", "5.6.7.8"])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_storageclass.py b/roles/lib_openshift/src/test/unit/test_oc_storageclass.py
new file mode 100755
index 000000000..4fd02a8b1
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_storageclass.py
@@ -0,0 +1,93 @@
+'''
+ Unit tests for oc serviceaccount
+'''
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_storageclass import OCStorageClass # noqa: E402
+
+
+class OCStorageClassTest(unittest.TestCase):
+ '''
+ Test class for OCStorageClass
+ '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'testsc',
+ 'provisioner': 'kubernetes.io/aws-ebs',
+ 'annotations': {'storageclass.beta.kubernetes.io/is-default-class': "true"},
+ 'parameters': {'type': 'gp2'},
+ 'api_version': 'v1',
+ 'default_storage_class': 'true'}
+
+ @mock.patch('oc_storageclass.locate_oc_binary')
+ @mock.patch('oc_storageclass.Utils.create_tmpfile_copy')
+ @mock.patch('oc_storageclass.OCStorageClass._run')
+ def test_adding_a_storageclass(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
+ ''' Testing adding a storageclass '''
+
+ # Arrange
+
+ # run_ansible input parameters
+
+ valid_result_json = '''{
+ "kind": "StorageClass",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testsc",
+ "selfLink": "/apis/storage.k8s.io/v1/storageclasses/gp2",
+ "uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
+ "resourceVersion": "2828",
+ "creationTimestamp": "2017-01-29T22:07:19Z",
+ "annotations": {"storageclass.beta.kubernetes.io/is-default-class": "true"}
+ },
+ "provisioner": "kubernetes.io/aws-ebs",
+ "parameters": {"type": "gp2"}
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error from server: storageclass "testsc" not found'),
+
+ # Second call to mock
+ (0, 'storageclass "testsc" created', ''),
+
+ # Third call to mock
+ (0, valid_result_json, ''),
+ ]
+
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCStorageClass.run_ansible(OCStorageClassTest.params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
+ ])
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index ee98470b0..95a305b58 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -421,15 +422,16 @@ class RepoqueryCLI(object):
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -437,6 +439,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -448,6 +452,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -458,7 +465,7 @@ class Repoquery(RepoqueryCLI):
version_dict = defaultdict(dict)
- for version in query_output.split('\n'):
+ for version in query_output.decode().split('\n'):
pkg_info = version.split("|")
pkg_version = {}
@@ -519,6 +526,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -541,6 +562,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -552,6 +576,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
@@ -592,6 +617,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index 9adaeeb52..baf72fe47 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -212,7 +213,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index cb4efa6c1..40773b1c1 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -18,6 +18,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/src/class/repoquery.py b/roles/lib_utils/src/class/repoquery.py
index 82adcada5..e997780ad 100644
--- a/roles/lib_utils/src/class/repoquery.py
+++ b/roles/lib_utils/src/class/repoquery.py
@@ -5,15 +5,16 @@
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -21,6 +22,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -32,6 +35,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -42,7 +48,7 @@ class Repoquery(RepoqueryCLI):
version_dict = defaultdict(dict)
- for version in query_output.split('\n'):
+ for version in query_output.decode().split('\n'):
pkg_info = version.split("|")
pkg_version = {}
@@ -103,6 +109,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -125,6 +145,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -136,6 +159,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index e0a27012f..957c35a06 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -11,7 +11,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index b0ab7c92c..567f8c9e0 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -9,6 +9,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/test/unit/test_repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index e39d9d83f..325f41dab 100755
--- a/roles/lib_utils/src/test/unit/test_repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
@@ -37,6 +37,7 @@ class RepoQueryTest(unittest.TestCase):
'verbose': False,
'show_duplicates': False,
'match_version': None,
+ 'ignore_excluders': False,
}
valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
@@ -44,7 +45,7 @@ class RepoQueryTest(unittest.TestCase):
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
- (0, '4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3', valid_stderr), # first call to the mock
+ (0, b'4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3', valid_stderr), # first call to the mock
]
# Act
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
deleted file mode 100644
index c90f4f443..000000000
--- a/roles/nuage_master/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-nuage_master_cspadminpasswd: ""
-nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index fefd28bbd..4f8adb63e 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -33,6 +33,14 @@
- include: certificates.yml
+- name: Install Nuage VSD user certificate
+ become: yes
+ copy: src="{{ vsd_user_cert_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}"
+
+- name: Install Nuage VSD user key
+ become: yes
+ copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}"
+
- name: Create nuage-openshift-monitor.yaml
become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index de2a97e37..e077128a4 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,12 +15,10 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
-# CSP admin user's password
-cspAdminPassword: {{ nuage_master_cspadminpasswd }}
-# Enterprise admin user name
-enterpriseAdminUser: {{ nuage_master_adminusername }}
-# Enterprise admin password
-enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
+# VSD generated user certificate file location on master node
+userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}
+# VSD generated user key file location on master node
+userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server parameters
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d82dd36a4..928f9e2e6 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -20,6 +20,21 @@
become: yes
yum: name={{ plugin_rpm }} state=present
+- name: Assure CNI conf dir exists
+ become: yes
+ file: path="{{ cni_conf_dir }}" state=directory
+
+- name: Assures Openshift CNI bin dir exists
+ become: yes
+ file: path="{{ cni_bin_dir }}" state=directory
+
+- name: Install CNI loopback plugin
+ become: yes
+ copy:
+ src: "{{ k8s_cni_loopback_plugin }}"
+ dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}"
+ mode: 0755
+
- name: Copy the certificates and keys
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2
index d3c0a122a..9fab53906 100644
--- a/roles/nuage_node/templates/vsp-openshift.j2
+++ b/roles/nuage_node/templates/vsp-openshift.j2
@@ -8,6 +8,8 @@ CACert: {{ ca_cert }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# Name of the VSD user in admin group
+vsdUser: {{ vsduser }}
# IP address and port number of master API server
masterApiServer: {{ api_server }}
# REST server URL
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 7b789152f..4cf68411f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -19,4 +19,7 @@ nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_node
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+cni_conf_dir: "/etc/cni/net.d/"
+cni_bin_dir: "/opt/cni/bin/"
+
nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 3b17d9ed6..419679bc2 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -95,7 +95,7 @@
{% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
@@ -108,6 +108,59 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
+- name: Test local loopback context
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the loopback
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create temp directory for loopback master client config
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: openshift_ca_loopback_tmpdir
+ - name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ - name: Copy generated loopback master client config to master config dir
+ copy:
+ src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ openshift_ca_config_dir }}"
+ remote_src: true
+ with_items:
+ - openshift-master.crt
+ - openshift-master.key
+ - openshift-master.kubeconfig
+ - name: Delete temp directory
+ file:
+ name: "{{ openshift_ca_loopback_tmpdir.stdout }}"
+ state: absent
+ when: loopback_context_string not in loopback_config.stdout
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
- name: Restore original serviceaccount keys
copy:
src: "{{ item }}.keep"
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index a32e385ec..d04c1766d 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -4,3 +4,6 @@ openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index df43c3770..f19a421cb 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -19,7 +19,6 @@ to be used with an inventory that is representative of the
cluster. For best results run `ansible-playbook` with the `-v` option.
-
# Role Variables
Core variables in this role:
@@ -51,11 +50,11 @@ How to use the Certificate Expiration Checking Role.
Run one of the example playbooks using an inventory file
representative of your existing cluster. Some example playbooks are
-included in this role, or you can read on below after this example to
-craft you own.
+included in this role, or you can [read on below for more examples](#more-example-playbooks)
+to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -66,14 +65,50 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
+## Run from a container
+
+The example playbooks that use this role are packaged in the
+[container image for openshift-ansible](../../README_CONTAINER_IMAGE.md), so you
+can run any of them by setting the `PLAYBOOK_FILE` environment variable when
+running an openshift-ansible container.
+
+There are several [examples](../../examples/README.md) in the `examples` directory that run certificate check playbooks from a container running on OpenShift.
+
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
+
+### Default behavior
+
+This playbook just invokes the certificate expiration check role with default options:
+
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+```
+
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
+### Easy mode
This example playbook is great if you're just wanting to **try the
role out**. This playbook enables HTML and JSON reports. All
@@ -95,18 +130,79 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+```
+
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
+
+### Easy mode and upload reports to masters
+
+This example builds on top of [easy-mode.yaml](#easy-mode) and additionally
+uploads a copy of the generated reports to the masters, with a timestamp in the
+file names.
+
+This is specially useful when the playbook runs from within a container, because
+the reports are generated inside the container and we need a way to access them.
+Uploading a copy of the reports to the masters is one way to make it easy to
+access them. Alternatively you can use the
+[role variables](#role-variables) that control the path of the generated reports
+to point to a container volume (see the [playbook with custom paths](#generate-html-and-json-reports-in-a-custom-path) for an example).
+
+With the container use case in mind, this playbook allows control over some
+options via environment variables:
+
+ - `CERT_EXPIRY_WARN_DAYS`: sets `openshift_certificate_expiry_warning_days`, overriding the role's default.
+ - `COPY_TO_PATH`: path in the masters where generated reports are uploaded.
+
+```yaml
+---
+- name: Generate certificate expiration reports
+ hosts: nodes:masters:etcd
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
+ roles:
+ - role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Create directory in masters
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports to the masters
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
```
-> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+```
-***
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
-Default behavior:
+### Generate HTML and JSON artifacts in their default paths
```yaml
---
@@ -114,25 +210,27 @@ Default behavior:
hosts: nodes:masters:etcd
become: yes
gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
roles:
- role: openshift_certificate_expiry
```
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
-
-***
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
+### Generate HTML and JSON reports in a custom path
-Generate HTML and JSON artifacts in their default paths:
+This example customizes the report generation path to point to a specific path (`/var/lib/certcheck`) and uses a date timestamp for the generated files. This allows you to reuse a certain location to keep multiple copies of the reports.
```yaml
---
@@ -143,22 +241,25 @@ Generate HTML and JSON artifacts in their default paths:
vars:
openshift_certificate_expiry_generate_html_report: yes
openshift_certificate_expiry_save_json_results: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
roles:
- role: openshift_certificate_expiry
```
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
-***
+### Long warning window
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -177,16 +278,16 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
-***
+### Long warning window and JSON report
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
@@ -206,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 5f102e960..a2bc9ecdb 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom filters for use in openshift-ansible
"""
@@ -35,7 +34,7 @@ Example playbook usage:
become: no
run_once: yes
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
copy:
content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
dest: "{{ openshift_certificate_expiry_json_results_path }}"
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index c204b5341..44a8fa29b 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -104,6 +104,7 @@ platforms missing the Python OpenSSL library.
self.extensions = []
PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
for line in self.cert_string.split('\n'):
l = line.strip()
if PARSING_ALT_NAMES:
@@ -114,10 +115,26 @@ platforms missing the Python OpenSSL library.
PARSING_ALT_NAMES = False
continue
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
# parse out the bits that we can
if l.startswith('Serial Number:'):
- # Serial Number: 11 (0xb)
- # => 11
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
self.serial = int(l.split()[-2])
elif l.startswith('Not After :'):
@@ -135,7 +152,7 @@ platforms missing the Python OpenSSL library.
continue
elif l.startswith('Subject:'):
- # O=system:nodes, CN=system:node:m01.example.com
+ # O = system:nodes, CN = system:node:m01.example.com
self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
def get_serial_number(self):
@@ -202,7 +219,7 @@ object"""
"""
self.subjects = []
for s in subject_string.split(', '):
- name, _, value = s.partition('=')
+ name, _, value = s.partition(' = ')
self.subjects.append((name, value))
def get_components(self):
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index 139d5de6e..b5234bd1e 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -13,12 +13,12 @@
src: cert-expiry-table.html.j2
dest: "{{ openshift_certificate_expiry_html_report_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_generate_html_report|bool }}"
+ when: openshift_certificate_expiry_generate_html_report|bool
- name: Generate the result JSON string
run_once: yes
set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
become: no
@@ -27,4 +27,4 @@
src: save_json_results.j2
dest: "{{ openshift_certificate_expiry_json_results_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py
index 4ca35ecbc..df948fff0 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/openshift_certificate_expiry/test/conftest.py
@@ -23,7 +23,10 @@ VALID_CERTIFICATE_PARAMS = [
{
'short_name': 'combined',
'cn': 'combined.example.com',
- 'serial': 6,
+ # Verify that HUGE serials parse correctly.
+ # Frobs PARSING_HEX_SERIAL in _parse_cert
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240
+ 'serial': 14449739080294792594019643629255165375,
'uses': b'clientAuth, serverAuth',
'dns': ['etcd'],
'ip': ['10.0.0.2', '192.168.0.2']
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
index ccdd48fa8..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402
@pytest.fixture(scope='module')
def fake_valid_cert(valid_cert):
- cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text']
+ cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text',
+ '-nameopt', 'oneline']
cert = subprocess.check_output(cmd)
return FakeOpenSSLCertificate(cert.decode('utf8'))
diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md
new file mode 100644
index 000000000..8283afed6
--- /dev/null
+++ b/roles/openshift_cfme/README.md
@@ -0,0 +1,404 @@
+# OpenShift-Ansible - CFME Role
+
+# PROOF OF CONCEPT - Alpha Version
+
+This role is based on the work in the upstream
+[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
+project. For additional literature on configuration specific to
+ManageIQ (optional post-installation tasks), visit the project's
+[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration).
+
+Please submit a
+[new issue](https://github.com/openshift/openshift-ansible/issues/new)
+if you run into bugs with this role or wish to request enhancements.
+
+# Important Notes
+
+This is an early *proof of concept* role to install the Cloud Forms
+Management Engine (ManageIQ) on OpenShift Container Platform (OCP).
+
+* This role is still in **ALPHA STATUS**
+* Many options are hard-coded still (ex: NFS setup)
+* Not many configurable options yet
+* **Should** be ran on a dedicated cluster
+* **Will not run** on undersized infra
+* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable
+
+## Requirements
+
+**NOTE:** These requirements are copied from the upstream
+[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
+project.
+
+### Prerequisites:
+
+*
+ [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html)
+ or
+ [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html)
+ provisioned
+* NFS or other compatible volume provider
+* A cluster-admin user (created by role if required)
+
+### Cluster Sizing
+
+In order to avoid random deployment failures due to resource
+starvation, we recommend a minimum cluster size for a **test**
+environment.
+
+| Type | Size | CPUs | Memory |
+|----------------|---------|----------|----------|
+| Masters | `1+` | `8` | `12GB` |
+| Nodes | `2+` | `4` | `8GB` |
+| PV Storage | `25GB` | `N/A` | `N/A` |
+
+
+![Basic CFME Deployment](img/CFMEBasicDeployment.png)
+
+**CFME has hard-requirements for memory. CFME will NOT install if your
+ infrastructure does not meet or exceed the requirements given
+ above. Do not run this playbook if you do not have the required
+ memory, you will just waste your time.**
+
+
+### Other sizing considerations
+
+* Recommendations assume MIQ will be the **only application running**
+ on this cluster.
+* Alternatively, you can provision an infrastructure node to run
+ registry/metrics/router/logging pods.
+* Each MIQ application pod will consume at least `3GB` of RAM on initial
+ deployment (blank deployment without providers).
+* RAM consumption will ramp up higher depending on appliance use, once
+ providers are added expect higher resource consumption.
+
+
+### Assumptions
+
+1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements
+1) Your NFS server is on your master host
+1) Your PV backing NFS storage volume is mounted on `/exports/`
+
+Required directories that NFS will export to back the PVs:
+
+* `/exports/miq-pv0[123]`
+
+If the required directories are not present at install-time, they will
+be created using the recommended permissions per the
+[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data):
+
+* UID/GID: `root`/`root`
+* Mode: `0775`
+
+**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS
+ storage, **ensure** it is mounted on `/exports/` **before** running
+ this role.
+
+
+
+## Role Variables
+
+Core variables in this role:
+
+| Name | Default value | Description |
+|-------------------------------|---------------|---------------|
+| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding |
+
+
+Variables you may override have defaults defined in
+[defaults/main.yml](defaults/main.yml).
+
+
+# Important Notes
+
+This is a **tech preview** status role presently. Use it with the same
+caution you would give any other pre-release software.
+
+**Most importantly** follow this one rule: don't re-run the entrypoint
+playbook multiple times in a row without cleaning up after previous
+runs if some of the CFME steps have ran. This is a known
+flake. Cleanup instructions are provided at the bottom of this README.
+
+
+# Usage
+
+This section describes the basic usage of this role. All parameters
+will use their [default values](defaults/main.yml).
+
+## Pre-flight Checks
+
+**IMPORTANT:** As documented above in [the prerequisites](#prerequisites),
+ you **must already** have your OCP cluster up and running.
+
+**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to
+save some spin-up time post-deployment, you can begin pre-pulling the
+docker image to each of your nodes now:
+
+```
+root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
+```
+
+## Getting Started
+
+1) The *entry point playbook* to install CFME is located in
+[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml)
+directory
+
+2) Update your existing `hosts` inventory file and ensure the
+parameter `openshift_cfme_install_app` is set to `True` under the
+`[OSEv3:vars]` block.
+
+2) Using your existing `hosts` inventory file, run `ansible-playbook`
+with the entry point playbook:
+
+```
+$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml
+```
+
+## Next Steps
+
+Once complete, the playbook will let you know:
+
+
+```
+TASK [openshift_cfme : Status update] *********************************************************
+ok: [ho.st.na.me] => {
+ "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n"
+}
+```
+
+This will take several minutes (*possibly 10 or more*, depending on
+your network connection). However, you can get some insight into the
+deployment process during initialization.
+
+### oc describe pod manageiq-0
+
+*Some useful information about the output you will see if you run the
+`oc describe pod manageiq-0` command*
+
+**Readiness probe**s - These will take a while to become
+`Healthy`. The initial health probes won't even happen for at least 8
+minutes depending on how long it takes you to pull down the large
+images. ManageIQ is a large application so it may take a considerable
+amount of time for it to deploy and be marked as `Healthy`.
+
+If you go to the node you know the application is running on (check
+for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe`
+output) you can run a `docker pull` command to monitor the progress of
+the image pull:
+
+```
+[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
+Trying to pull repository docker.io/manageiq/manageiq-pods ...
+sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods
+Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a
+Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine
+```
+
+The example above demonstrates the case where the image has been
+successfully pulled already.
+
+If the image isn't completely pulled already then you will see
+multiple progress bars detailing each image layer download status.
+
+
+### rsh
+
+*Useful inspection/progress monitoring techniques with the `oc rsh`
+command.*
+
+
+On your master node, switch to the `cfme` project (or whatever you
+named it if you overrode the `openshift_cfme_project` variable) and
+check on the pod states:
+
+```
+[root@cfme-master01 ~]# oc project cfme
+Now using project "cfme" on server "https://10.10.0.100:8443".
+
+[root@cfme-master01 ~]# oc get pod
+NAME READY STATUS RESTARTS AGE
+manageiq-0 0/1 Running 0 14m
+memcached-1-3lk7g 1/1 Running 0 14m
+postgresql-1-12slb 1/1 Running 0 14m
+```
+
+Note how the `manageiq-0` pod says `0/1` under the **READY**
+column. After some time (depending on your network connection) you'll
+be able to `rsh` into the pod to find out more of what's happening in
+real time. First, the easy-mode command, run this once `rsh` is
+available and then watch until it says `Started Initialize Appliance
+Database`:
+
+```
+[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service
+```
+
+For the full explanation of what this means, and more interactive
+inspection techniques, keep reading on.
+
+To obtain a shell on our `manageiq` pod we use this command:
+
+```
+[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l
+```
+
+The `rsh` command opens a shell in your pod for you. In this case it's
+the pod called `manageiq-0`. `systemd` is managing the services in
+this pod so we can use the `list-units` command to see what is running
+currently: `# systemctl list-units | grep appliance`.
+
+If you see the `appliance-initialize` service running, this indicates
+that basic setup is still in progress. We can monitor the process with
+the `journalctl` command like so:
+
+
+```
+[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config ==
+Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance ==
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database...
+Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting
+Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete
+Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping
+Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service.
+Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database.
+```
+
+Most of what we see here (above) is the initial database seeding
+process. This process isn't very quick, so be patient.
+
+At the bottom of the log there is a special line from the `systemctl`
+service, `Removed symlink
+/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The
+`appliance-initialize` service is no longer marked as enabled. This
+indicates that the base application initialization is complete now.
+
+We're not done yet though, there are other ancillary services which
+run in this pod to support the application. *Still in the rsh shell*,
+Use the `ps` command to monitor for the `httpd` processes
+starting. You will see output similar to the following when that stage
+has completed:
+
+```
+[root@manageiq-0 vmdb]# ps aux | grep http
+root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+```
+
+Furthermore, you can find other related processes by just looking for
+ones with `MIQ` in their name:
+
+```
+[root@manageiq-0 vmdb]# ps aux | grep miq
+root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server
+root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic
+root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic
+root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic
+root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic
+root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5
+root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems
+root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting
+root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting
+root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker]
+root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker]
+root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker]
+root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq
+```
+
+Finally, *still in the rsh shell*, to test if the application is
+running correctly, we can request the application homepage. If the
+page is available the page title will be `ManageIQ: Login`:
+
+```
+[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>'
+<title>
+ManageIQ: Login
+</title>
+```
+
+**Note:** The `-s` flag makes `curl` operations silent and the `-k`
+flag to ignore errors about untrusted certificates.
+
+
+
+# Additional Upstream Resources
+
+Below are some useful resources from the upstream project
+documentation. You may find these of value.
+
+* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
+* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes)
+* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
+
+
+# Manual Cleanup
+
+At this time uninstallation/cleanup is still a manual process. You
+will have to follow a few steps to fully remove CFME from your
+cluster.
+
+Delete the project:
+
+* `oc delete project cfme`
+
+Delete the PVs:
+
+* `oc delete pv miq-pv01`
+* `oc delete pv miq-pv02`
+* `oc delete pv miq-pv03`
+
+Clean out the old PV data:
+
+* `cd /exports/`
+* `find miq* -type f -delete`
+* `find miq* -type d -delete`
+
+Remove the NFS exports:
+
+* `rm /etc/exports.d/openshift_cfme.exports`
+* `exportfs -ar`
+
+Delete the user:
+
+* `oc delete user cfme`
+
+**NOTE:** The `oc delete project cfme` command will return quickly
+however it will continue to operate in the background. Continue
+running `oc get project` after you've completed the other steps to
+monitor the pods and final project termination progress.
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
new file mode 100644
index 000000000..493e1ef68
--- /dev/null
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -0,0 +1,38 @@
+---
+# Namespace for the CFME project
+openshift_cfme_project: cfme
+# Namespace/project description
+openshift_cfme_project_description: ManageIQ - CloudForms Management Engine
+# Basic user assigned the `admin` role for the project
+openshift_cfme_user: cfme
+# Project system account for enabling privileged pods
+openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default"
+# All the required exports
+openshift_cfme_pv_exports:
+ - miq-pv01
+ - miq-pv02
+ - miq-pv03
+# PV template files and their created object names
+openshift_cfme_pv_data:
+ - pv_name: miq-pv01
+ pv_template: miq-pv-db.yaml
+ pv_label: CFME DB PV
+ - pv_name: miq-pv02
+ pv_template: miq-pv-region.yaml
+ pv_label: CFME Region PV
+ - pv_name: miq-pv03
+ pv_template: miq-pv-server.yaml
+ pv_label: CFME Server PV
+
+# Tuning parameter to use more than 5 images at once from an ImageStream
+openshift_cfme_maxImagesBulkImportedPerRepository: 100
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
+# TODO: Refactor '_install_app' variable. This is just for testing but
+# maybe in the future it should control the entire yes/no for CFME.
+#
+# Whether or not the manageiq app should be initialized ('oc new-app
+# --template=manageiq). If False everything UP TO 'new-app' is ran.
+openshift_cfme_install_app: False
+# Docker image to pull
+openshift_cfme_container_image: "docker.io/manageiq/manageiq-pods:app-latest-fine"
diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml
new file mode 100644
index 000000000..8f0d2af38
--- /dev/null
+++ b/roles/openshift_cfme/files/miq-template.yaml
@@ -0,0 +1,566 @@
+---
+path: /tmp/miq-template-out
+data:
+ apiVersion: v1
+ kind: Template
+ labels:
+ template: manageiq
+ metadata:
+ name: manageiq
+ annotations:
+ description: "ManageIQ appliance with persistent storage"
+ tags: "instant-app,manageiq,miq"
+ iconClass: "icon-rails"
+ objects:
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances ManageIQ pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+ - apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-app
+ annotations:
+ description: "Keeps track of the ManageIQ image changes"
+ spec:
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-postgresql
+ annotations:
+ description: "Keeps track of the PostgreSQL image changes"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-memcached
+ annotations:
+ description: "Keeps track of the Memcached image changes"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-region"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+ - apiVersion: apps/v1beta1
+ kind: "StatefulSet"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the ManageIQ appliance"
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "${NAME}-server"
+ mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/manageiq/container-scripts/sync-pv-data
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+ - apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ - apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "miq-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+ - apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ - apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "miq-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "miq-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ containers:
+ -
+ name: "postgresql"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "miq-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+
+ parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: manageiq
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "256MB"
+ -
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
+ -
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
+ -
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "postgresql-latest-fine"
+ -
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "memcached-latest-fine"
+ -
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "app-latest-fine"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_REPLICA_COUNT"
+ displayName: "Application Replica Count"
+ description: "This is the number of Application replicas requested to deploy."
+ value: "1"
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "15"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "15Gi"
diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports
new file mode 100644
index 000000000..5457d41fc
--- /dev/null
+++ b/roles/openshift_cfme/files/openshift_cfme.exports
@@ -0,0 +1,3 @@
+/exports/miq-pv01 *(rw,no_root_squash,no_wdelay)
+/exports/miq-pv02 *(rw,no_root_squash,no_wdelay)
+/exports/miq-pv03 *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml
new file mode 100644
index 000000000..476a5e030
--- /dev/null
+++ b/roles/openshift_cfme/handlers/main.yml
@@ -0,0 +1,42 @@
+---
+######################################################################
+# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml
+#
+# TODO: Use the consolidated 'openshift_handlers' role once it's ready
+# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782
+######################################################################
+
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png
new file mode 100644
index 000000000..a89c1e325
--- /dev/null
+++ b/roles/openshift_cfme/img/CFMEBasicDeployment.png
Binary files differ
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml
new file mode 100644
index 000000000..9200f2c3c
--- /dev/null
+++ b/roles/openshift_cfme/meta/main.yml
@@ -0,0 +1,20 @@
+---
+galaxy_info:
+ author: Tim Bielawa
+ description: OpenShift CFME (Manage IQ) Deployer
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ version: 1.0
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_common
+- role: openshift_master_facts
diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml
new file mode 100644
index 000000000..7fa7d3997
--- /dev/null
+++ b/roles/openshift_cfme/tasks/create_pvs.yml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required CFME PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: miq_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ template_dir }}/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ template_dir }}/{{ item.pv_template }}"
+ when:
+ - not miq_pv_check.results.results.0
diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml
new file mode 100644
index 000000000..acbce7232
--- /dev/null
+++ b/roles/openshift_cfme/tasks/main.yml
@@ -0,0 +1,148 @@
+---
+######################################################################
+# Users, projects, and privileges
+
+- name: Ensure the CFME user exists
+ oc_user:
+ state: present
+ username: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME namespace exists with CFME user as admin
+ oc_project:
+ state: present
+ name: "{{ openshift_cfme_project }}"
+ display_name: "{{ openshift_cfme_project_description }}"
+ admin: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME namespace service account is privileged
+ oc_adm_policy_user:
+ namespace: "{{ openshift_cfme_project }}"
+ user: "{{ openshift_cfme_service_account }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+######################################################################
+# NFS
+
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the miq-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0775
+ owner: root
+ group: root
+ with_items: "{{ openshift_cfme_pv_exports }}"
+
+- name: Ensure the NFS exports for CFME PVs exist
+ copy:
+ src: openshift_cfme.exports
+ dest: /etc/exports.d/openshift_cfme.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required CFME PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yml
+ with_items: "{{ openshift_cfme_pv_data }}"
+
+######################################################################
+# CFME App Template
+#
+# Note, this is different from the create_pvs.yml tasks in that the
+# application template does not require any jinja2 evaluation.
+#
+# TODO: Handle the case where the server template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- name: Check if the CFME Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: list
+ kind: template
+ name: manageiq
+ register: miq_server_check
+
+- name: Copy over CFME Server template
+ copy:
+ src: miq-template.yaml
+ dest: "{{ template_dir }}/miq-template.yaml"
+
+- name: Ensure the server template was read from disk
+ debug:
+ var=r_openshift_cfme_miq_template_content
+
+- name: Ensure CFME Server Template exists
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ kind: template
+ name: "manageiq"
+ state: present
+ content: "{{ r_openshift_cfme_miq_template_content }}"
+
+######################################################################
+# Let's do this
+
+- name: Ensure the CFME Server is created
+ oc_process:
+ namespace: "{{ openshift_cfme_project }}"
+ template_name: manageiq
+ create: True
+ register: cfme_new_app_process
+ run_once: True
+ when:
+ # User said to install CFME in their inventory
+ - openshift_cfme_install_app | bool
+ # # The server app doesn't exist already
+ # - not miq_server_check.results.results.0
+
+- debug:
+ var: cfme_new_app_process
+
+######################################################################
+# Various cleanup steps
+
+# TODO: Not sure what to do about this right now. Might be able to
+# just delete it? This currently warns about "Unable to find
+# '<TEMP_DIR>' in expected paths."
+- name: Ensure the temporary PV/App templates are erased
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_fileglob:
+ - "{{ template_dir }}/*.yaml"
+
+- name: Ensure the temporary PV/app template directory is erased
+ file:
+ path: "{{ template_dir }}"
+ state: absent
+
+######################################################################
+
+- name: Status update
+ debug:
+ msg: >
+ CFME has been deployed. Note that there will be a delay before
+ it is fully initialized.
diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml
new file mode 100644
index 000000000..02b0f10bf
--- /dev/null
+++ b/roles/openshift_cfme/tasks/tune_masters.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure bulk image import limit is tuned
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository'
+ value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}"
+ state: present
+ backup: True
+ notify:
+ - restart master
+
+- meta: flush_handlers
diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml
new file mode 100644
index 000000000..cba734a0e
--- /dev/null
+++ b/roles/openshift_cfme/tasks/uninstall.yml
@@ -0,0 +1,43 @@
+---
+- include_role:
+ name: lib_openshift
+
+- name: Uninstall CFME - ManageIQ
+ debug:
+ msg: Uninstalling Cloudforms Management Engine - ManageIQ
+
+- name: Ensure the CFME project is removed
+ oc_project:
+ state: absent
+ name: "{{ openshift_cfme_project }}"
+
+- name: Ensure the CFME template is removed
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: absent
+ kind: template
+ name: manageiq
+
+- name: Ensure the CFME PVs are removed
+ oc_obj:
+ state: absent
+ all_namespaces: True
+ kind: pv
+ name: "{{ item }}"
+ with_items: "{{ openshift_cfme_pv_exports }}"
+
+- name: Ensure the CFME user is removed
+ oc_user:
+ state: absent
+ username: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME NFS Exports are removed
+ file:
+ path: /etc/exports.d/openshift_cfme.exports
+ state: absent
+ register: nfs_exports_removed
+
+- name: Ensure the NFS export table is refreshed if exports were removed
+ command: exportfs -ar
+ when:
+ - nfs_exports_removed.changed
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
new file mode 100644
index 000000000..b8c3bb277
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv01
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
new file mode 100644
index 000000000..7218773f0
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv02
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
new file mode 100644
index 000000000..7b40b6c69
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv03
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 4ed3e1f01..57ac16602 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -1,8 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=missing-docstring,invalid-name
-#
import random
import tempfile
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index f22dd4520..5788e6d74 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -7,4 +7,4 @@
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
src: openstack.conf.j2
- when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
+ when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index d9ccf87bc..51313a258 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -28,10 +28,18 @@
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
- fail:
- msg: Calico cannot currently be used with Flannel in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
- fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
new file mode 100644
index 000000000..198163127
--- /dev/null
+++ b/roles/openshift_default_storage_class/README.md
@@ -0,0 +1,39 @@
+openshift_master_storage_class
+=========
+
+A role that deploys configuratons for Openshift StorageClass
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+openshift_storageclass_name: Name of the storage class to create
+openshift_storageclass_provisioner: The kubernetes provisioner to use
+openshift_storageclass_type: type of storage to use. This is different among clouds/providers
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+- role: openshift_default_storage_class
+ openshift_storageclass_name: awsEBS
+ openshift_storageclass_provisioner: kubernetes.io/aws-ebs
+ openshift_storageclass_type: gp2
+
+
+License
+-------
+
+Apache
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
new file mode 100644
index 000000000..bdece7640
--- /dev/null
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -0,0 +1,19 @@
+---
+openshift_storageclass_defaults:
+ aws:
+ provisioner: aws-ebs
+ name: gp2
+ parameters:
+ type: gp2
+ kmsKeyId: ''
+ encrypted: 'false'
+ gce:
+ name: standard
+ provisioner: gce-pd
+ parameters:
+ type: pd-standard
+
+openshift_storageclass_default: "true"
+openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
+openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
+openshift_storageclass_parameters: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['parameters'] }}"
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
new file mode 100644
index 000000000..d7d57fe39
--- /dev/null
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: This role configures the StorageClass in Openshift
+ company: Red Hat
+ license: Apache
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ verisons:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
new file mode 100644
index 000000000..172e2ac25
--- /dev/null
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+# Install default storage classes in GCE & AWS
+- name: Ensure storageclass object
+ oc_storageclass:
+ name: "{{ openshift_storageclass_name }}"
+ default_storage_class: "{{ openshift_storageclass_default | default('true') | string}}"
+ parameters: "{{ openshift_storageclass_parameters }}"
+ provisioner: "{{ openshift_storageclass_provisioner }}"
+ run_once: true
diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_default_storage_class/vars/main.yml
index ed97d539c..ed97d539c 100644
--- a/roles/openshift_etcd_ca/tasks/main.yml
+++ b/roles/openshift_default_storage_class/vars/main.yml
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 049ceffe0..95e94171d 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -16,6 +16,10 @@
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
+ use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
+ - role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- set_fact:
docker_additional_registries: "{{ openshift.docker.additional_registries
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 82db36eba..b3ecd57a6 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -5,6 +5,7 @@ etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
-etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_cert_config_dir: "/etc/etcd"
+etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 0f2bec6d3..e8d687877 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,10 +5,9 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.6
-ORIGIN_VERSION=${1:-v1.6}
-RHAMP_TAG=1.0.0.GA
-RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
+XPAAS_VERSION=ose-v1.4.1
+ORIGIN_VERSION=${1:-v3.6}
+RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
TEMP=`mktemp -d`
@@ -17,9 +16,11 @@ pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
+wget https://github.com/3scale/rhamp-openshift-templates/archive/${RHAMP_TAG}.zip -O amp.zip
unzip origin-master.zip
unzip application-templates-master.zip
unzip fis-GA.zip
+unzip amp.zip
mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
@@ -30,15 +31,11 @@ mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BA
mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+find 3scale-amp-openshift-templates-${RHAMP_TAG}/ -name '*.yml' -exec mv {} ${EXAMPLES_BASE}/quickstart-templates/ \;
popd
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
-wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/logging-deployer.yaml
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
index 536385712..08751d131 120000
--- a/roles/openshift_examples/files/examples/latest
+++ b/roles/openshift_examples/files/examples/latest
@@ -1 +1 @@
-v1.6 \ No newline at end of file
+v3.6 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
index 4f25a9c8f..982bd9530 100644
--- a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
@@ -48,7 +48,7 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms app image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+ dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-app
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -188,7 +188,7 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms memcached image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+ dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-memcached
- apiVersion: v1
kind: "DeploymentConfig"
metadata:
@@ -272,7 +272,7 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms postgresql image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+ dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-postgresql
- apiVersion: v1
kind: "DeploymentConfig"
metadata:
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..fef86ff5a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.4.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..536f7275e 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
index 6ed744777..3b7fdccce 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..ee274194f 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
index 0656219fb..e5ba43669 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
index d60b4647d..969e62ac5 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -36,7 +41,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
index c2bfa40fd..4f39d41a5 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..c37102cb0 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
index 242212d6f..32dc93a95 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
index 82a09a3ec..6bb683e52 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
@@ -21,11 +21,27 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
@@ -117,7 +133,12 @@
"env": [
{
"name": "REDIS_PASSWORD",
- "value": "${REDIS_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
index 1d5f59188..9e8be2309 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
@@ -21,11 +21,27 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
@@ -134,7 +150,12 @@
"env": [
{
"name": "REDIS_PASSWORD",
- "value": "${REDIS_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
index 0d5ac21d8..857ffa980 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
@@ -27,8 +27,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.1/test/asp-net-hello-world"
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1"
},
"from": {
"kind": "ImageStreamTag",
@@ -43,8 +44,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
"supports":"dotnet:1.1,dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.1/test/asp-net-hello-world",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1",
"version": "1.1"
},
"from": {
@@ -60,8 +62,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
"supports":"dotnet:1.0,dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.0/test/asp-net-hello-world",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.0",
"version": "1.0"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
index 1a90a9409..6cef21945 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/httpd-24-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +468,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +484,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +500,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +516,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +532,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -800,7 +861,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
index eb94c3bb4..abdae01e3 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/httpd-24-rhel7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +314,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
@@ -707,7 +768,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
index f48d8d4a8..6d2ccbf7f 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
@@ -17,6 +17,7 @@ instantiating them.
* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex).
* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/amp.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/amp.yml
new file mode 100644
index 000000000..4e469f6e8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/amp.yml
@@ -0,0 +1,1261 @@
+base_env: &base_env
+- name: RAILS_ENV
+ value: "production"
+- name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+- name: FORCE_SSL
+ value: "true"
+- name: THREESCALE_SUPERDOMAIN
+ value: "${WILDCARD_DOMAIN}"
+- name: TENANT_NAME
+ value: "${TENANT_NAME}"
+- name: APICAST_ACCESS_TOKEN
+ value: "${APICAST_ACCESS_TOKEN}"
+- name: ADMIN_ACCESS_TOKEN
+ value: "${ADMIN_ACCESS_TOKEN}"
+- name: PROVIDER_PLAN
+ value: 'enterprise'
+- name: USER_LOGIN
+ value: "${ADMIN_USERNAME}"
+- name: USER_PASSWORD
+ value: "${ADMIN_PASSWORD}"
+- name: RAILS_LOG_TO_STDOUT
+ value: "true"
+- name: RAILS_LOG_LEVEL
+ value: "info"
+- name: THINKING_SPHINX_ADDRESS
+ value: "system-sphinx"
+- name: THINKING_SPHINX_PORT
+ value: "9306"
+- name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "/tmp/sphinx.conf"
+- name: EVENTS_SHARED_SECRET
+ value: "${SYSTEM_BACKEND_SHARED_SECRET}"
+- name: THREESCALE_SANDBOX_PROXY_OPENSSL_VERIFY_MODE
+ value: "VERIFY_NONE"
+- name: APICAST_BACKEND_ROOT_ENDPOINT
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+- name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+- name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+- name: SECRET_KEY_BASE
+ value: "${SYSTEM_APP_SECRET_KEY_BASE}"
+- name: AMP_RELEASE
+ value: "${AMP_RELEASE}"
+- name: SMTP_ADDRESS
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: address
+- name: SMTP_USER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: username
+- name: SMTP_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: password
+- name: SMTP_DOMAIN
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: domain
+- name: SMTP_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: port
+- name: SMTP_AUTHENTICATION
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: authentication
+- name: SMTP_OPENSSL_VERIFY_MODE
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: openssl.verify.mode
+- name: BACKEND_ROUTE
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: "system"
+message: "Login on https://${TENANT_NAME}-admin.${WILDCARD_DOMAIN} as ${ADMIN_USERNAME}/${ADMIN_PASSWORD}"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-cron
+ spec:
+ replicas: 1
+ selector:
+ name: backend-cron
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-cron
+ spec:
+ containers:
+ - args:
+ - backend-cron
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-cron
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-redis
+ spec:
+ replicas: 1
+ selector:
+ name: backend-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: backend-redis
+ spec:
+ containers:
+ - image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: backend-redis
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ volumeMounts:
+ - name: backend-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ volumes:
+ - name: backend-redis-storage
+ persistentVolumeClaim:
+ claimName: backend-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-listener
+ spec:
+ replicas: 1
+ selector:
+ name: backend-listener
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-listener
+ spec:
+ containers:
+ - args:
+ - 3scale_backend
+ - start
+ - "-e"
+ - production
+ - "-p"
+ - '3000'
+ - "-x"
+ - "/dev/stdout"
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+ - name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-listener
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3000
+ readinessProbe:
+ httpGet:
+ path: "/status"
+ port: 3000
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ selector:
+ name: backend-redis
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-listener
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: 3000
+ name: http
+ selector:
+ name: backend-listener
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-provider
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: provider
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-developer
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: developer
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-worker
+ spec:
+ replicas: 1
+ selector:
+ name: backend-worker
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-worker
+ spec:
+ containers:
+ - args:
+ - 3scale_backend_worker
+ - run
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_EVENTS_HOOK
+ value: http://system-provider:3000/master/events/import
+ - name: CONFIG_EVENTS_HOOK_SHARED_SECRET
+ value: ${SYSTEM_BACKEND_SHARED_SECRET}
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-worker
+ triggers:
+ - type: ConfigChange
+
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ ports:
+ - name: system-mysql
+ protocol: TCP
+ port: 3306
+ targetPort: 3306
+ nodePort: 0
+ selector:
+ name: 'system-mysql'
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ name: redis
+ selector:
+ name: system-redis
+
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-redis
+ spec:
+ replicas: 1
+ selector:
+ name: system-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: system-redis
+ spec:
+ containers:
+ - args:
+ image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: system-redis
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - name: system-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: system-redis-storage
+ persistentVolumeClaim:
+ claimName: system-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-sphinx
+ spec:
+ ports:
+ - port: 9306
+ protocol: TCP
+ targetPort: 9306
+ name: sphinx
+ selector:
+ name: system-sphinx
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sphinx
+ spec:
+ replicas: 1
+ selector:
+ name: system-sphinx
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sphinx
+ spec:
+ volumes:
+ - name: system-sphinx-database
+ emptyDir: {}
+ containers:
+ - args:
+ - rake
+ - 'openshift:thinking_sphinx:start'
+ volumeMounts:
+ - name: system-sphinx-database
+ mountPath: "/opt/system/db/sphinx"
+ env:
+ - name: RAILS_ENV
+ value: production
+ - name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+ - name: THINKING_SPHINX_ADDRESS
+ value: 0.0.0.0
+ - name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "db/sphinx/production.conf"
+ - name: THINKING_SPHINX_PID_FILE
+ value: db/sphinx/searchd.pid
+ - name: DELTA_INDEX_INTERVAL
+ value: '5'
+ - name: FULL_REINDEX_INTERVAL
+ value: '60'
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sphinx
+ livenessProbe:
+ tcpSocket:
+ port: 9306
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-memcache
+ spec:
+ ports:
+ - port: 11211
+ protocol: TCP
+ targetPort: 11211
+ name: memcache
+ selector:
+ name: system-memcache
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-memcache
+ spec:
+ replicas: 1
+ selector:
+ name: system-memcache
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-memcache
+ spec:
+ containers:
+ - args:
+ env:
+ image: 3scale-amp20/memcached:1.4.15-7
+ imagePullPolicy: IfNotPresent
+ name: memcache
+ readinessProbe:
+ exec:
+ command:
+ - "sh"
+ - "-c"
+ - "echo version | nc $HOSTNAME 11211 | grep VERSION"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 11211
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ command:
+ - "memcached"
+ - "-m"
+ - "64"
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-provider-admin-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}-admin.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-provider
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: backend-route
+ labels:
+ app: system-route
+ spec:
+ host: backend-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: backend-listener
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-developer-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-developer
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-staging
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-staging
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-staging
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: http://${APICAST_ACCESS_TOKEN}@system-provider:3000
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "sandbox"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/2"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-staging
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-staging
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-staging
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-production
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-production
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-production
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: "http://${APICAST_ACCESS_TOKEN}@system-provider:3000"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "boot"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "300"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "production"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/1"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-production
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-production
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-production
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-staging-route
+ labels:
+ app: apicast-staging
+ spec:
+ host: api-${TENANT_NAME}-apicast-staging.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-staging
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-production-route
+ labels:
+ app: apicast-production
+ spec:
+ host: api-${TENANT_NAME}-apicast-production.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-production
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-app
+ spec:
+ replicas: 1
+ selector:
+ name: system-app
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ pre:
+ failurePolicy: Retry
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:deploy
+ env: *base_env
+ volumes:
+ - system-storage
+ post:
+ failurePolicy: Abort
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:post_deploy
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-app
+ spec:
+ containers:
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ command: ['env', 'TENANT_MODE=provider', 'PORT=3000', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ name: system-provider
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: provider
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: provider
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ name: provider
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ command: ['env', 'TENANT_MODE=developer', 'PORT=3001', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ imagePullPolicy: IfNotPresent
+ name: system-developer
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: developer
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: developer
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3001
+ protocol: TCP
+ name: developer
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ readOnly: true
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-resque
+ spec:
+ replicas: 1
+ selector:
+ name: system-resque
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-resque
+ spec:
+ containers:
+ - args:
+ - 'rake'
+ - 'resque:work'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-resque
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ - 'rake'
+ - 'resque:scheduler'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-scheduler
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sidekiq
+ spec:
+ replicas: 1
+ selector:
+ name: system-sidekiq
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sidekiq
+ spec:
+ containers:
+ - args:
+ - rake
+ - sidekiq:worker
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sidekiq
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: 'system-mysql'
+ template:
+ metadata:
+ labels:
+ name: 'system-mysql'
+ spec:
+ containers:
+ - name: system-mysql
+ image: ${MYSQL_IMAGE}
+ ports:
+ - containerPort: 3306
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ readinessProbe:
+ timeoutSeconds: 5
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ exec:
+ command:
+ - /bin/sh
+ - '-i'
+ - '-c'
+ - MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3306
+ env:
+ - name: MYSQL_USER
+ value: ${MYSQL_USER}
+ - name: MYSQL_PASSWORD
+ value: ${MYSQL_PASSWORD}
+ - name: MYSQL_DATABASE
+ value: ${MYSQL_DATABASE}
+ - name: MYSQL_ROOT_PASSWORD
+ value: ${MYSQL_ROOT_PASSWORD}
+ - name: MYSQL_LOWER_CASE_TABLE_NAMES
+ value: "1"
+ volumeMounts:
+ - name: 'mysql-storage'
+ mountPath: /var/lib/mysql/data
+ imagePullPolicy: IfNotPresent
+ volumes:
+ - name: 'mysql-storage'
+ persistentVolumeClaim:
+ claimName: 'mysql-storage'
+- kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: redis-config
+ data:
+ redis.conf: |
+ protected-mode no
+
+ port 6379
+
+ timeout 0
+ tcp-keepalive 300
+
+ daemonize no
+ supervised no
+
+ loglevel notice
+
+ databases 16
+
+ save 900 1
+ save 300 10
+ save 60 10000
+
+ stop-writes-on-bgsave-error yes
+
+ rdbcompression yes
+ rdbchecksum yes
+
+ dbfilename dump.rdb
+
+ slave-serve-stale-data yes
+ slave-read-only yes
+
+ repl-diskless-sync no
+ repl-disable-tcp-nodelay no
+
+ appendonly yes
+ appendfilename "appendonly.aof"
+ appendfsync everysec
+ no-appendfsync-on-rewrite no
+ auto-aof-rewrite-percentage 100
+ auto-aof-rewrite-min-size 64mb
+ aof-load-truncated yes
+
+ lua-time-limit 5000
+
+ activerehashing no
+
+ aof-rewrite-incremental-fsync yes
+ dir /var/lib/redis/data
+
+- kind: ConfigMap
+
+ apiVersion: v1
+ metadata:
+ name: smtp
+ data:
+ address: ""
+ username: ""
+ password: ""
+ domain: ""
+ port: ""
+ authentication: ""
+ openssl.verify.mode: ""
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: ADMIN_PASSWORD
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+- name: ADMIN_USERNAME
+ value: admin
+ required: true
+- name: APICAST_ACCESS_TOKEN
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+ description: "Read Only Access Token that is APIcast going to use to download its configuration."
+- name: ADMIN_ACCESS_TOKEN
+ required: false
+ generate: expression
+ from: "[a-z0-9]{16}"
+ description: "Admin Access Token with all scopes and write permissions for API access."
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Tenant name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
+- name: MYSQL_USER
+ displayName: MySQL User
+ description: Username for MySQL user that will be used for accessing the database.
+ value: "mysql"
+ required: true
+- name: MYSQL_PASSWORD
+ displayName: MySQL Password
+ description: Password for the MySQL user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: MYSQL_DATABASE
+ displayName: MySQL Database Name
+ description: Name of the MySQL database accessed.
+ value: "system"
+ required: true
+- name: MYSQL_ROOT_PASSWORD
+ displayName: MySQL Root password.
+ description: Password for Root user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_BACKEND_USERNAME
+ description: Internal 3scale API username for internal 3scale api auth.
+ value: "3scale_api_user"
+ required: true
+- name: SYSTEM_BACKEND_PASSWORD
+ description: Internal 3scale API password for internal 3scale api auth.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: REDIS_IMAGE
+ description: Redis image to use
+ required: true
+ value: rhscl/redis-32-rhel7:3.2-5.7
+- name: MYSQL_IMAGE
+ description: Mysql image to use
+ required: true
+ value: rhscl/mysql-56-rhel7:5.6-13.14
+- name: SYSTEM_BACKEND_SHARED_SECRET
+ description: Shared secret to import events from backend to system.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_APP_SECRET_KEY_BASE
+ description: System application secret key base
+ generate: expression
+ from: "[a-f0-9]{128}"
+ required: true
+- name: APICAST_MANAGEMENT_API
+ description: "Scope of the APIcast Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: APICAST_OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification when downloading the configuration. Can be set to true/false."
+ required: false
+ value: "false"
+- name: APICAST_RESPONSE_CODES
+ description: "Enable logging response codes in APIcast."
+ value: "true"
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml
deleted file mode 100644
index 34f5fcbcc..000000000
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: 3scale-gateway
- annotations:
- description: "3scale API Gateway"
- iconClass: "icon-load-balancer"
- tags: "api,gateway,3scale"
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- replicas: 2
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 25%
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- labels:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- spec:
- containers:
- - env:
- - name: THREESCALE_PORTAL_ENDPOINT
- valueFrom:
- secretKeyRef:
- name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
- key: password
- - name: THREESCALE_CONFIG_FILE
- value: ${THREESCALE_CONFIG_FILE}
- - name: RESOLVER
- value: ${RESOLVER}
- - name: APICAST_SERVICES
- value: ${APICAST_SERVICES}
- - name: APICAST_MISSING_CONFIGURATION
- value: ${MISSING_CONFIGURATION}
- - name: APICAST_LOG_LEVEL
- value: ${APICAST_LOG_LEVEL}
- - name: APICAST_PATH_ROUTING_ENABLED
- value: ${PATH_ROUTING}
- - name: APICAST_RESPONSE_CODES
- value: ${RESPONSE_CODES}
- - name: APICAST_REQUEST_LOGS
- value: ${REQUEST_LOGS}
- - name: APICAST_RELOAD_CONFIG
- value: ${APICAST_RELOAD_CONFIG}
- image: ${THREESCALE_GATEWAY_IMAGE}
- imagePullPolicy: Always
- name: ${THREESCALE_GATEWAY_NAME}
- livenessProbe:
- httpGet:
- path: /status/live
- port: 8090
- initialDelaySeconds: 10
- timeoutSeconds: 1
- readinessProbe:
- httpGet:
- path: /status/ready
- port: 8090
- initialDelaySeconds: 15
- timeoutSeconds: 1
- ports:
- - containerPort: 8080
- protocol: TCP
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- securityContext: {}
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status: {}
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
-parameters:
-- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
- value: threescale-portal-endpoint-secret
- name: THREESCALE_PORTAL_ENDPOINT_SECRET
- required: true
-- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
- value:
- name: THREESCALE_CONFIG_FILE
- required: false
-- description: "Name for the 3scale API Gateway"
- value: threescalegw
- name: THREESCALE_GATEWAY_NAME
- required: true
-- description: "Docker image to use."
- value: 'rhamp10/apicast-gateway:1.0.0-4'
- name: THREESCALE_GATEWAY_IMAGE
- required: true
-- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
- value:
- name: RESOLVER
- required: false
-- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
- value:
- name: APICAST_SERVICES
- required: false
-- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
- value: exit
- required: false
- name: MISSING_CONFIGURATION
-- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
- name: APICAST_LOG_LEVEL
- required: false
-- description: "Enable path routing. Experimental feature."
- name: PATH_ROUTING
- required: false
- value: "false"
-- description: "Enable traffic logging to 3scale. Includes whole request and response."
- value: "false"
- name: REQUEST_LOGS
- required: false
-- description: "Enable logging response codes to 3scale."
- value: "false"
- name: RESPONSE_CODES
- required: false
-- description: "Reload config on every request"
- value: "false"
- name: APICAST_RELOAD_CONFIG
- required: false
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast.yml
new file mode 100644
index 000000000..8e8051c0b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast.yml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: "${APICAST_NAME}"
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: "${CONFIGURATION_URL_SECRET}"
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: "${CONFIGURATION_FILE_PATH}"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "${DEPLOYMENT_ENVIRONMENT}"
+ - name: RESOLVER
+ value: "${RESOLVER}"
+ - name: APICAST_SERVICES
+ value: "${SERVICES_LIST}"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "${CONFIGURATION_LOADER}"
+ - name: APICAST_LOG_LEVEL
+ value: "${LOG_LEVEL}"
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: "${PATH_ROUTING}"
+ - name: APICAST_RESPONSE_CODES
+ value: "${RESPONSE_CODES}"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "${CONFIGURATION_CACHE}"
+ - name: REDIS_URL
+ value: "${REDIS_URL}"
+ - name: APICAST_MANAGEMENT_API
+ value: "${MANAGEMENT_API}"
+ - name: OPENSSL_VERIFY
+ value: "${OPENSSL_VERIFY}"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: "${APICAST_NAME}"
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: management
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - name: proxy
+ containerPort: 8080
+ protocol: TCP
+ - name: management
+ containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ ports:
+ - name: proxy
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: apicast-configuration-url-secret
+ name: CONFIGURATION_URL_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: CONFIGURATION_FILE_PATH
+ required: false
+- description: "Deployment environment. Can be sandbox or production."
+ value: production
+ name: DEPLOYMENT_ENVIRONMENT
+ required: true
+- description: "Name for the 3scale API Gateway"
+ value: apicast
+ name: APICAST_NAME
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: SERVICES_LIST
+ required: false
+- name: CONFIGURATION_LOADER
+ description: "When to load configuration. If on gateway start or incoming request. Allowed values are: lazy, boot."
+ value: boot
+ required: false
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- name: CONFIGURATION_CACHE
+ description: "For how long to cache the downloaded configuration in seconds. Can be left empty, 0 or greater than 60."
+ value: ""
+ required: false
+- description: "Redis URL. Required for OAuth2 integration. ex: redis://PASSWORD@127.0.0.1:6379/0"
+ name: REDIS_URL
+ required: false
+- name: MANAGEMENT_API
+ description: "Scope of the Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification. Can be set to true/false."
+ required: true
+ value: "false"
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..6d987ee33 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -510,7 +513,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
index da2454d2e..fb2ef206e 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -484,7 +487,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..7ffb25e14 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -472,7 +478,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
index 7a285dba8..d787e376b 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -446,7 +452,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..a2070207b 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -479,7 +482,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
index 7bee85ddd..0d33c6e0e 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -453,7 +456,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-example.json
index a09d71a00..af46579c8 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-example.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dotnet-example",
"annotations": {
- "openshift.io/display-name": ".NET Core",
+ "openshift.io/display-name": ".NET Core Example",
"description": "An example .NET Core application.",
"tags": "quickstart,dotnet,.net",
"iconClass": "icon-dotnet",
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/httpd.json
new file mode 100644
index 000000000..ac671cc06
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/httpd.json
@@ -0,0 +1,274 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "httpd-example",
+ "annotations": {
+ "openshift.io/display-name": "Httpd",
+ "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "tags": "quickstart,httpd",
+ "iconClass": "icon-apache",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "labels": {
+ "template": "httpd-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "httpd:2.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "httpd-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "httpd-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": [
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "httpd-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/httpd-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the httpd service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
index b0aef3cfc..ce96684a9 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
@@ -110,6 +113,10 @@
"value": "true"
},
{
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -255,6 +262,12 @@
"value": "true"
},
{
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
index a542de219..34b2b920b 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
@@ -127,6 +130,10 @@
"value": "true"
},
{
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -272,6 +279,12 @@
"value": "true"
},
{
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..a9c365361 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -491,7 +494,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..53a6147d5 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -467,7 +470,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/pvc.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/pvc.yml
new file mode 100644
index 000000000..0bbb8e625
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/pvc.yml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-pvc"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..f07a43071 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -526,7 +533,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
index fa67412ff..a7992c988 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -500,7 +507,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/wildcard.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/wildcard.yml
new file mode 100644
index 000000000..00dedecd5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/wildcard.yml
@@ -0,0 +1,158 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-apicast-wildcard-router"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-router
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-router
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-router
+ spec:
+ volumes:
+ - name: apicast-router-config
+ configMap:
+ name: apicast-router-config
+ items:
+ - key: router.conf
+ path: router.conf
+ containers:
+ - env:
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-router
+ command: ['bin/apicast']
+ livenessProbe:
+ tcpSocket:
+ port: router
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 30
+ volumeMounts:
+ - name: apicast-router-config
+ mountPath: /opt/app-root/src/sites.d/
+ readOnly: true
+ ports:
+ - containerPort: 8082
+ name: router
+ protocol: TCP
+ - containerPort: 8090
+ name: management
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-router
+ spec:
+ ports:
+ - name: router
+ port: 80
+ protocol: TCP
+ targetPort: router
+ selector:
+ deploymentconfig: apicast-router
+
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: apicast-router-config
+ data:
+ router.conf: |-
+ upstream wildcard {
+ server 0.0.0.1:1;
+
+ balancer_by_lua_block {
+ local round_robin = require 'resty.balancer.round_robin'
+ local balancer = round_robin.new()
+ local peers = balancer:peers(ngx.ctx.apicast)
+
+ local peer, err = balancer:set_peer(peers)
+
+ if not peer then
+ ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
+ ngx.log(ngx.ERR, "failed to set current backend peer: ", err)
+ ngx.exit(ngx.status)
+ end
+ }
+
+ keepalive 1024;
+ }
+
+ server {
+ listen 8082;
+ server_name ~-(?<apicast>apicast-(staging|production))\.;
+ access_log /dev/stdout combined;
+
+ location / {
+ access_by_lua_block {
+ local resolver = require('resty.resolver'):instance()
+ local servers = resolver:get_servers(ngx.var.apicast, { port = 8080 })
+
+ if #servers == 0 then
+ ngx.status = ngx.HTTP_BAD_GATEWAY
+ ngx.exit(ngx.HTTP_OK)
+ end
+
+ ngx.ctx.apicast = servers
+ }
+ proxy_http_version 1.1;
+ proxy_pass $scheme://wildcard;
+ proxy_set_header Host $host;
+ proxy_set_header Connection "";
+ }
+ }
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: apicast-wildcard-router
+ labels:
+ app: apicast-wildcard-router
+ spec:
+ host: apicast-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-router
+ port:
+ targetPort: router
+ wildcardPolicy: Subdomain
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Domain name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
index 049f3f884..0bb56452b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
@@ -12,7 +12,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver30-tomcat7-openshift"
+ "name": "jboss-webserver30-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
@@ -23,10 +26,11 @@
"description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
}
},
{
@@ -35,10 +39,23 @@
"description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
}
}
]
@@ -48,7 +65,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver30-tomcat8-openshift"
+ "name": "jboss-webserver30-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
@@ -59,10 +79,11 @@
"description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
}
},
{
@@ -71,10 +92,23 @@
"description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
}
}
]
@@ -84,7 +118,66 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-eap64-openshift"
+ "name": "jboss-webserver31-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports": "tomcat7:3.1,tomcat:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver31-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports": "tomcat8:3.1,tomcat:8,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
@@ -95,11 +188,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -108,11 +202,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -121,11 +216,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.3",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.3"
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -134,11 +230,25 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.4",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.5"
}
}
]
@@ -148,7 +258,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-eap70-openshift"
+ "name": "jboss-eap70-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
@@ -159,11 +272,12 @@
"description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.3"
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
}
},
{
@@ -172,11 +286,25 @@
"description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.4",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.5"
}
}
]
@@ -186,7 +314,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-decisionserver62-openshift"
+ "name": "jboss-decisionserver62-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
@@ -196,12 +327,13 @@
"annotations": {
"description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
"iconClass": "icon-jboss",
- "tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.2,java:8,xpaas:1.2",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.2,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.2",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
}
}
]
@@ -211,7 +343,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-decisionserver63-openshift"
+ "name": "jboss-decisionserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
@@ -221,12 +356,51 @@
"annotations": {
"description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
"iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
"tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.3,java:8,xpaas:1.3",
+ "supports":"decisionserver:6.3,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.3"
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.4 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.0"
}
}
]
@@ -236,7 +410,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-processserver63-openshift"
+ "name": "jboss-processserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
@@ -246,12 +423,26 @@
"annotations": {
"description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
"iconClass": "icon-jboss",
+ "tags": "builder,processserver,xpaas",
+ "supports": "processserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
"tags": "builder,processserver,java,xpaas",
- "supports":"processserver:6.3,java:8,xpaas:1.3",
+ "supports":"processserver:6.3,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.3"
+ "version": "1.4"
}
}
]
@@ -261,7 +452,35 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-datagrid65-openshift"
+ "name": "jboss-processserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.4 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
@@ -271,9 +490,56 @@
"annotations": {
"description": "JBoss Data Grid 6.5 S2I images.",
"iconClass": "icon-jboss",
- "tags": "datagrid,java,jboss,xpaas",
- "supports":"datagrid:6.5,java:8,xpaas:1.2",
- "version": "1.2"
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.4",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports":"datagrid:6.5,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-client-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 Client Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
}
}
]
@@ -283,7 +549,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-datavirt63-openshift"
+ "name": "jboss-datavirt63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
@@ -293,9 +562,56 @@
"annotations": {
"description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
"iconClass": "icon-jboss",
- "tags": "datavirt,java,jboss,xpaas",
- "supports":"datavirt:6.3,java:8,xpaas:1.4",
- "version": "1.0"
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports":"datavirt:6.3,xpaas:1.4",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-driver-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
}
}
]
@@ -305,7 +621,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-amq-62"
+ "name": "jboss-amq-62",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
@@ -316,8 +635,9 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.1",
- "version": "1.1"
+ "supports": "amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
}
},
{
@@ -326,8 +646,9 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.2",
- "version": "1.2"
+ "supports": "amq:6.2,messaging,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
}
},
{
@@ -336,8 +657,45 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.3",
- "version": "1.3"
+ "supports": "amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-63",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss A-MQ 6.3 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.3,messaging,xpaas:1.0",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
}
}
]
@@ -348,8 +706,9 @@
"apiVersion": "v1",
"metadata": {
"name": "redhat-sso70-openshift",
- "annotations": {
- "description": "Red Hat SSO 7.0"
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
}
},
"spec": {
@@ -361,8 +720,20 @@
"description": "Red Hat SSO 7.0",
"iconClass": "icon-jboss",
"tags": "sso,keycloak,redhat",
- "supports":"sso:7.0,xpaas:1.3",
- "version": "1.3"
+ "supports": "sso:7.0,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.0,xpaas:1.4",
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
}
}
]
@@ -372,7 +743,48 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "redhat-openjdk18-openshift"
+ "name": "redhat-sso71-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso71-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
@@ -384,11 +796,24 @@
"description": "Build and run Java applications using Maven and OpenJDK 8.",
"iconClass": "icon-jboss",
"tags": "builder,java,xpaas,openjdk",
- "supports":"java:8,xpaas:1.0",
+ "supports": "java:8,xpaas:1.0",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.0"
}
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports": "java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.1"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-basic.json
index ab35afead..af20b373a 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-basic.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral, no SSL)"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,18 +69,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -171,7 +183,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -202,7 +215,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent-ssl.json
index c12f06dec..5acdbfabf 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent-ssl.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent with SSL)"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,48 +83,56 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -309,7 +328,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -340,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent.json
index 897ce0395..b8089cd6d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-persistent.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent, no SSL)"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,18 +83,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -183,7 +197,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -214,7 +229,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-ssl.json
index 97d110286..b52fdbfb0 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq62-ssl.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral with SSL)"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,48 +69,56 @@
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -297,7 +314,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -328,7 +346,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-basic.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-basic.json
new file mode 100644
index 000000000..d29f6a300
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-basic.json
@@ -0,0 +1,334 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral, no SSL)"
+ },
+ "name": "amq63-basic"
+ },
+ "labels": {
+ "template": "amq63-basic",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent-ssl.json
new file mode 100644
index 000000000..47f6396dd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent-ssl.json
@@ -0,0 +1,569 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent with SSL)"
+ },
+ "name": "amq63-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq63-persistent-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent.json
new file mode 100644
index 000000000..4b64203c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-persistent.json
@@ -0,0 +1,386 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent, no SSL)"
+ },
+ "name": "amq63-persistent"
+ },
+ "labels": {
+ "template": "amq63-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-ssl.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-ssl.json
new file mode 100644
index 000000000..20ad50016
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/amq63-ssl.json
@@ -0,0 +1,521 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral with SSL)"
+ },
+ "name": "amq63-ssl"
+ },
+ "labels": {
+ "template": "amq63-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-basic.json
index 56e76016f..32433bef0 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-basic.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-basic.json
@@ -6,76 +6,103 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral, no https)"
},
"name": "datagrid65-basic"
},
"labels": {
"template": "datagrid65-basic",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -200,7 +227,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -267,9 +294,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -302,6 +334,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-https.json
index 639ac2e11..e6f020400 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-https.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-https.json
@@ -6,130 +6,166 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral with https)"
},
"name": "datagrid65-https"
},
"labels": {
"template": "datagrid65-https",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -301,7 +337,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -386,9 +422,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -437,6 +478,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql-persistent.json
index 22ca3f0a0..ff57a7936 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Persistent with https)"
},
"name": "datagrid65-mysql-persistent"
},
"labels": {
"template": "datagrid65-mysql-persistent",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,117 +111,158 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -230,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +311,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +336,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -302,7 +361,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -397,7 +457,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -482,9 +542,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -585,6 +650,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -669,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql.json
index e1a585d24..44902de25 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-mysql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Ephemeral with https)"
},
"name": "datagrid65-mysql"
},
"labels": {
"template": "datagrid65-mysql",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,111 +111,151 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -224,7 +279,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -248,7 +304,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +329,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -296,7 +354,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -391,7 +450,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -476,9 +535,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -579,6 +643,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -663,7 +735,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql-persistent.json
index 12720eb19..6b90e1370 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Persistent with https)"
},
"name": "datagrid65-postgresql-persistent"
},
"labels": {
"template": "datagrid65-postgresql-persistent",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,102 +111,140 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -215,7 +268,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -239,7 +293,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -382,7 +437,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -467,9 +522,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -570,6 +630,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -654,7 +722,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql.json
index da8015fb0..ae36376db 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datagrid65-postgresql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications built using.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Ephemeral with https)"
},
"name": "datagrid65-postgresql"
},
"labels": {
"template": "datagrid65-postgresql",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,96 +111,133 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -209,7 +261,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -233,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -376,7 +430,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -461,9 +515,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -564,6 +623,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -648,7 +715,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json
index 7d64dac98..ea2f13742 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (no SSL)"
},
"name": "datavirt63-basic-s2i"
},
@@ -60,6 +61,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -133,6 +135,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -215,7 +238,22 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
@@ -224,8 +262,26 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
- }
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
}
},
"output": {
@@ -252,6 +308,15 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json
index 1e7c03b99..22b579ecc 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL and Extensions)"
},
"name": "datavirt63-extensions-support-s2i"
},
@@ -102,6 +103,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -238,6 +240,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -446,6 +469,19 @@
{
"from": {
"kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ },
+ {
+ "from": {
+ "kind": "ImageStreamTag",
"name": "${APPLICATION_NAME}-ext:latest"
},
"paths": [
@@ -464,12 +500,24 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
+ "name": "jboss-datavirt63-openshift:1.2"
},
"env": [
{
"name": "CUSTOM_INSTALL_DIRECTORIES",
"value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
]
}
@@ -507,6 +555,15 @@
}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -713,7 +770,7 @@
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ "value": "${HTTPS_KEYSTORE}"
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json
index 07f926ff3..9392c20a6 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL)"
},
"name": "datavirt63-secure-s2i"
},
@@ -74,6 +75,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -210,6 +212,168 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "displayName": "SSO Server URL",
+ "name": "SSO_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "SSO Server Service URL",
+ "name": "SSO_SERVICE_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "displayName": "SSO Realm",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "displayName": "SSO Username",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "displayName": "SSO User's Password",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Realm Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
+ "displayName": "SSO Realm Public Key",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type. true or false",
+ "displayName": "SSO Bearer Only",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "displayName": "SSO SAML Keystore Secret",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "displayName": "SSO SAML Keystore File",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "displayName": "SSO SAML Certificate Alias",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "displayName": "SSO SAML Keystore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "displayName": "SSO Client Secret",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Enable CORS for SSO applications. true or false",
+ "name": "SSO_ENABLE_CORS",
+ "displayName": "SSO Enable CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "displayName": "SSO SAML Logout Page",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "displayName": "SSO Disable SSL Certificate Validation",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "displayName": "SSO Truststore File",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "displayName": "SSO Truststore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "displayName": "SSO Truststore Secret",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO OpenIDConnect via auth-method",
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "displayName": "SSO OpenIDConnect Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO SAML via auth-method",
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "displayName": "SSO SAML Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -354,7 +518,22 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
@@ -363,8 +542,26 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
- }
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
}
},
"output": {
@@ -391,6 +588,15 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -597,7 +803,7 @@
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ "value": "${HTTPS_KEYSTORE}"
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
@@ -610,6 +816,98 @@
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
"value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "value": "${SSO_OPENIDCONNECT_DEPLOYMENTS}"
+ },
+ {
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "value": "${SSO_SAML_DEPLOYMENTS}"
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-amq-s2i.json
index 754a3b4c0..1989036fa 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-amq-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server + A-MQ (with https)"
},
"name": "decisionserver62-amq-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-amq-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,102 +40,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -138,6 +160,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,18 +168,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +198,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -198,7 +226,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -222,7 +251,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -564,7 +594,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-basic-s2i.json
index 8be4ac90b..25b2c162c 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-basic-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (no https)"
},
"name": "decisionserver62-basic-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-basic-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,6 +113,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-https-s2i.json
index bf9047599..85605d642 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver62-https-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (with https)"
},
"name": "decisionserver62-https-s2i"
},
@@ -14,32 +15,38 @@
"template": "decisionserver62-https-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +162,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-amq-s2i.json
index 51e667e02..ecea54d94 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-amq-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server + A-MQ (with https)"
},
"name": "decisionserver63-amq-s2i"
},
"labels": {
"template": "decisionserver63-amq-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,108 +40,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -144,6 +167,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -151,18 +175,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,10 +205,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -204,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -228,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -334,13 +378,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
@@ -574,7 +626,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-basic-s2i.json
index c5f0d006a..d655dbe94 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (no https)"
},
"name": "decisionserver63-basic-s2i"
},
"labels": {
"template": "decisionserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,10 +113,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -184,13 +213,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-https-s2i.json
index 3db0e4c84..78e79c0cf 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver63-https-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (with https)"
},
"name": "decisionserver63-https-s2i"
},
"labels": {
"template": "decisionserver63-https-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,10 +162,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -273,13 +309,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-amq-s2i.json
new file mode 100644
index 000000000..c688a2a67
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-amq-s2i.json
@@ -0,0 +1,748 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server + A-MQ (with https)"
+ },
+ "name": "decisionserver64-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-amq-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-basic-s2i.json
new file mode 100644
index 000000000..778c51844
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-basic-s2i.json
@@ -0,0 +1,376 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (no https)"
+ },
+ "name": "decisionserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-https-s2i.json
new file mode 100644
index 000000000..e6c6961c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/decisionserver64-https-s2i.json
@@ -0,0 +1,517 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (with https)"
+ },
+ "name": "decisionserver64-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-persistent-s2i.json
index 72dbb4302..912838175 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -5,131 +5,153 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Persistent with https)"
},
"name": "eap64-amq-persistent-s2i"
},
"labels": {
"template": "eap64-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -641,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-s2i.json
index 9dd847451..dd4c7a27b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-amq-s2i.json
@@ -5,119 +5,139 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Ephemeral with https)"
},
"name": "eap64-amq-s2i"
},
"labels": {
"template": "eap64-amq-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -626,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-basic-s2i.json
index 7b1800b7b..e13b3851b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (no https)"
},
"name": "eap64-basic-s2i"
},
"labels": {
"template": "eap64-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-https-s2i.json
index 31716d84c..0da32eb40 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https)"
},
"name": "eap64-https-s2i"
},
"labels": {
"template": "eap64-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 212431056..77b75466d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Persistent with https)"
},
"name": "eap64-mongodb-persistent-s2i"
},
"labels": {
"template": "eap64-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -674,7 +744,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-s2i.json
index 13fbbdd93..2785782d4 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Ephemeral with https)"
},
"name": "eap64-mongodb-s2i"
},
"labels": {
"template": "eap64-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -668,7 +737,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-persistent-s2i.json
index 69fdec206..cca0f9c2b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Persistent with https)"
},
"name": "eap64-mysql-persistent-s2i"
},
"labels": {
"template": "eap64-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -681,7 +752,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-s2i.json
index 2bd3c249f..5766506fd 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Ephemeral with https)"
},
"name": "eap64-mysql-s2i"
},
"labels": {
"template": "eap64-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -675,7 +745,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 31f245950..01891774d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Persistent with https)"
},
"name": "eap64-postgresql-persistent-s2i"
},
"labels": {
"template": "eap64-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -666,7 +734,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-s2i.json
index eac964697..e00f2b0e3 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Ephemeral with https)"
},
"name": "eap64-postgresql-s2i"
},
"labels": {
"template": "eap64-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -660,7 +727,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-sso-s2i.json
index 09023be71..ec0739d04 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + Single Sign-On (with https)"
},
"name": "eap64-sso-s2i"
},
"labels": {
"template": "eap64-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -615,7 +671,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-third-party-db-s2i.json
index 07f926ff3..e8f6d6585 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap64-third-party-db-s2i.json
@@ -4,212 +4,231 @@
"metadata": {
"annotations": {
"iconClass": "icon-jboss",
- "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
- "tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "description": "Application template for EAP 6 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https, supporting third-party DB drivers)"
},
- "name": "datavirt63-secure-s2i"
+ "name": "eap64-third-party-db-s2i"
},
"labels": {
- "template": "datavirt63-secure-s2i",
+ "template": "eap64-third-party-db-s2i",
"xpaas": "1.4.0"
},
- "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
- "description": "The name for the application.",
"displayName": "Application Name",
+ "description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "datavirt-app",
+ "value": "eap-app",
"required": true
},
{
- "description": "The name of the secret containing configuration properties for the data sources.",
"displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
"name": "CONFIGURATION_NAME",
- "value": "datavirt-app-config",
+ "value": "eap-app-config",
"required": true
},
{
- "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
"displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
- "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
"displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
- "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
- "displayName": "Custom JDBC Route Hostname",
- "name": "HOSTNAME_JDBC",
- "value": "",
- "required": false
- },
- {
- "description": "The URL of the repository with your application source code.",
"displayName": "Git Repository URL",
+ "description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
- "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
"displayName": "Git Reference",
+ "description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "master",
"required": false
},
{
- "description": "Set this to the relative path to your project if it is not in the root of your repository.",
"displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
- "value": "datavirt/dynamicvdb-datafederation/app",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
"required": false
},
{
- "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
- "value": "datavirt-service-account",
+ "value": "eap-service-account",
"required": true
},
{
- "description": "The name of the secret containing the keystore to be used for serving secure content.",
"displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
- "value": "datavirt-app-secret",
+ "value": "eap-app-secret",
"required": true
},
{
- "description": "The name of the keystore file within the secret.",
"displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
- "description": "The type of the keystore file (JKS or JCEKS).",
"displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
- "description": "The name associated with the server certificate.",
"displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
- "description": "The password for the keystore and certificate",
"displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
- "description": "Username associated with Teiid data service.",
- "displayName": "Teiid Username",
- "name": "TEIID_USERNAME",
- "from": "[\\a]{8}",
- "generate": "expression",
- "required": true
- },
- {
- "description": "Password for Teiid user.",
- "displayName": "Teiid User Password",
- "name": "TEIID_PASSWORD",
- "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
- "generate": "expression",
- "required": true
- },
- {
- "description": "Username associated with ModeShape.",
- "displayName": "ModeShape Username",
- "name": "MODESHAPE_USERNAME",
- "from": "[\\a]{8}",
- "generate": "expression",
- "required": true
- },
- {
- "description": "Password for ModeShape user.",
- "displayName": "ModeShape User Password",
- "name": "MODESHAPE_PASSWORD",
- "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "displayName": "HornetQ Cluster Admin Password",
+ "description": "Admin password for HornetQ cluster.",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
},
{
- "description": "A secret string used to configure the GitHub webhook.",
"displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
},
{
- "description": "A secret string used to configure the Generic webhook.",
"displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
},
{
- "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
- "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
"displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
"name": "JGROUPS_ENCRYPT_SECRET",
- "value": "datavirt-app-secret",
+ "value": "eap-app-secret",
"required": false
},
{
- "description": "The name of the keystore file within the JGroups secret.",
"displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
- "description": "The name associated with the JGroups server certificate",
"displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "secret-key",
"required": false
},
{
- "description": "The password for the keystore and certificate",
"displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "password",
"required": false
},
{
- "description": "Password used by JGroups to authenticate nodes in the cluster.",
"displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
},
{
- "description": "Controls whether exploded deployment content should be automatically deployed",
"displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -219,24 +238,8 @@
"spec": {
"ports": [
{
- "name": "http",
"port": 8080,
- "targetPort": "http"
- },
- {
- "name": "https",
- "port": 8443,
- "targetPort": "https"
- },
- {
- "name": "jdbc",
- "port": 31000,
- "targetPort": "jdbc"
- },
- {
- "name": "jdbcs",
- "port": 31443,
- "targetPort": "jdbcs"
+ "targetPort": 8080
}
],
"selector": {
@@ -249,79 +252,71 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The data virtualization services."
+ "description": "The web server's http port."
}
}
},
{
- "kind": "Route",
+ "kind": "Service",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
"metadata": {
- "name": "${APPLICATION_NAME}",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Route for application's http (REST) service."
- }
- },
- "spec": {
- "host": "${HOSTNAME_HTTP}",
- "port": {
- "targetPort": "http"
- },
- "to": {
- "name": "${APPLICATION_NAME}"
+ "description": "The web server's https port."
}
}
},
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "secure-${APPLICATION_NAME}",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Route for application's https (REST) service."
+ "description": "Route for application's http service."
}
},
"spec": {
- "host": "${HOSTNAME_HTTPS}",
- "port": {
- "targetPort": "https"
- },
+ "host": "${HOSTNAME_HTTP}",
"to": {
"name": "${APPLICATION_NAME}"
- },
- "tls": {
- "termination": "passthrough"
}
}
},
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-jdbc",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "jdbc-${APPLICATION_NAME}",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Route for application's JDBC service."
+ "description": "Route for application's https service."
}
},
"spec": {
- "host": "${HOSTNAME_JDBC}",
- "port": {
- "targetPort": "jdbcs"
- },
+ "host": "${HOSTNAME_HTTPS}",
"to": {
- "name": "${APPLICATION_NAME}"
+ "name": "secure-${APPLICATION_NAME}"
},
"tls": {
"termination": "passthrough"
@@ -354,16 +349,45 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -391,6 +415,16 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -450,16 +484,16 @@
"volumeMounts": [
{
"name": "configuration",
- "mountPath": "/etc/datavirt-environment",
+ "mountPath": "/etc/eap-environment",
"readOnly": true
},
{
- "name": "datavirt-keystore-volume",
- "mountPath": "/etc/datavirt-secret-volume",
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
"readOnly": true
},
{
- "name": "datavirt-jgroups-keystore-volume",
+ "name": "eap-jgroups-keystore-volume",
"mountPath": "/etc/jgroups-encrypt-secret-volume",
"readOnly": true
}
@@ -499,16 +533,6 @@
"protocol": "TCP"
},
{
- "name": "jdbc",
- "containerPort": 31000,
- "protocol": "TCP"
- },
- {
- "name": "jdbcs",
- "containerPort": 31443,
- "protocol": "TCP"
- },
- {
"name": "ping",
"containerPort": 8888,
"protocol": "TCP"
@@ -528,8 +552,12 @@
}
},
{
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
"name": "HTTPS_KEYSTORE_DIR",
- "value": "/etc/datavirt-secret-volume"
+ "value": "/etc/eap-secret-volume"
},
{
"name": "HTTPS_KEYSTORE",
@@ -548,6 +576,18 @@
"value": "${HTTPS_PASSWORD}"
},
{
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
@@ -574,42 +614,6 @@
{
"name": "AUTO_DEPLOY_EXPLODED",
"value": "${AUTO_DEPLOY_EXPLODED}"
- },
- {
- "name": "TEIID_USERNAME",
- "value": "${TEIID_USERNAME}"
- },
- {
- "name": "TEIID_PASSWORD",
- "value": "${TEIID_PASSWORD}"
- },
- {
- "name": "MODESHAPE_USERNAME",
- "value": "${MODESHAPE_USERNAME}"
- },
- {
- "name": "MODESHAPE_PASSWORD",
- "value": "${MODESHAPE_PASSWORD}"
- },
- {
- "name": "ENV_FILES",
- "value": "/etc/datavirt-environment/*"
- },
- {
- "name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
- },
- {
- "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
- "value": "${HTTPS_KEYSTORE_TYPE}"
- },
- {
- "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
- "value": "${HTTPS_NAME}"
- },
- {
- "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
- "value": "${HTTPS_PASSWORD}"
}
]
}
@@ -622,13 +626,13 @@
}
},
{
- "name": "datavirt-keystore-volume",
+ "name": "eap-keystore-volume",
"secret": {
"secretName": "${HTTPS_SECRET}"
}
},
{
- "name": "datavirt-jgroups-keystore-volume",
+ "name": "eap-jgroups-keystore-volume",
"secret": {
"secretName": "${JGROUPS_ENCRYPT_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-persistent-s2i.json
index f08cdf2f9..3f0eba6e3 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -5,131 +5,153 @@
"annotations": {
"description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (Persistent with https)"
},
"name": "eap70-amq-persistent-s2i"
},
"labels": {
"template": "eap70-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -641,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-s2i.json
index 3ca9e9fab..f2d65f353 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-amq-s2i.json
@@ -5,119 +5,139 @@
"annotations": {
"description": "Application template for EAP 7 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (with https)"
},
"name": "eap70-amq-s2i"
},
"labels": {
"template": "eap70-amq-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -626,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-basic-s2i.json
index 83b4d5b24..c33e3f7cb 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (no https)"
},
"name": "eap70-basic-s2i"
},
"labels": {
"template": "eap70-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-https-s2i.json
index 1292442a4..7542d31c8 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https)"
},
"name": "eap70-https-s2i"
},
"labels": {
"template": "eap70-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-persistent-s2i.json
index 99db77d58..8a7da66c1 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Persistent with https)"
},
"name": "eap70-mongodb-persistent-s2i"
},
"labels": {
"template": "eap70-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -685,7 +755,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-s2i.json
index c8150c231..ae52a3deb 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Ephemeral with https)"
},
"name": "eap70-mongodb-s2i"
},
"labels": {
"template": "eap70-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -679,7 +748,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-persistent-s2i.json
index f8e5c2b04..a0a3d7717 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Persistent with https)"
},
"name": "eap70-mysql-persistent-s2i"
},
"labels": {
"template": "eap70-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -696,7 +767,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-s2i.json
index 1edeb62e7..8255ade5d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Ephemeral with https)"
},
"name": "eap70-mysql-s2i"
},
"labels": {
"template": "eap70-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -690,7 +760,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-persistent-s2i.json
index d11df06ee..436c541d8 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Persistent with https)"
},
"name": "eap70-postgresql-persistent-s2i"
},
"labels": {
"template": "eap70-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-s2i.json
index 6b7f6d707..a2a37a886 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Ephemeral with https)"
},
"name": "eap70-postgresql-s2i"
},
"labels": {
"template": "eap70-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -675,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-sso-s2i.json
index 811602220..08a844cd9 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + Single Sign-On (with https)"
},
"name": "eap70-sso-s2i"
},
"labels": {
"template": "eap70-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -593,7 +649,7 @@
{
"name": "HOSTNAME_HTTPS",
"value": "${HOSTNAME_HTTPS}"
- },
+ },
{
"name": "HTTPS_KEYSTORE_DIR",
"value": "/etc/eap-secret-volume"
@@ -626,7 +682,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-third-party-db-s2i.json
new file mode 100644
index 000000000..9e854d7ab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/eap70-third-party-db-s2i.json
@@ -0,0 +1,657 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap70-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap70-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within Messaging subsystem.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within Messaging subsystem.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Messaging Cluster Admin Password",
+ "description": "Admin password for Messaging cluster.",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 413a6de87..4e42e0eca 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (no https)"
},
"name": "jws30-tomcat7-basic-s2i"
},
"labels": {
"template": "jws30-tomcat7-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-https-s2i.json
index 610ea9441..f5fc2e581 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (with https)"
},
"name": "jws30-tomcat7-https-s2i"
},
"labels": {
"template": "jws30-tomcat7-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 6ef9d6e4c..2a73a182c 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat7-mongodb-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index 9b48f8ae7..a71dfa634 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat7-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 30af703ce..9a05dcbd5 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat7-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index c2843af63..553a30a44 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b8372f374..a5c6c8a56 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat7-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index cd5bb9fa4..61a3208e4 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -401,7 +459,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -525,7 +583,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-basic-s2i.json
index cb1e49d29..75d08e99d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (no https)"
},
"name": "jws30-tomcat8-basic-s2i"
},
"labels": {
"template": "jws30-tomcat8-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-https-s2i.json
index 21d5662c7..71577bec4 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (with https)"
},
"name": "jws30-tomcat8-https-s2i"
},
"labels": {
"template": "jws30-tomcat8-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 34657d826..de86dd83e 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat8-mongodb-persistent-s2i"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"labels": {
"template": "jws30-tomcat8-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index 974cfaddb..6dc85e226 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat8-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat8-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 7a8231cc5..0e96b58a9 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat8-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index cda21f237..08b040863 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat8-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 4dfc98015..f117e6624 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat8-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index f6c85668c..faece1269 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + (PostgreSQL with https)"
},
"name": "jws30-tomcat8-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -400,7 +458,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -524,7 +582,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..6db6e8cc6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (no https)"
+ },
+ "name": "jws31-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-https-s2i.json
new file mode 100644
index 000000000..fd5fca316
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (with https)"
+ },
+ "name": "jws31-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..6bbea8ab8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..a565ee4c0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..be6899958
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..2983cc905
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..cc5ea452c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..bd23e1558
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
@@ -0,0 +1,651 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..f3a5786f6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (no https)"
+ },
+ "name": "jws31-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-https-s2i.json
new file mode 100644
index 000000000..634948a80
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (with https)"
+ },
+ "name": "jws31-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..1ad60d8cc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-persistent-s2i"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..f3e918afc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..08b456440
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..260515b73
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..eef5b6939
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..07ef7218a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
@@ -0,0 +1,649 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + (PostgreSQL with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json
index 143e16756..a48e204ae 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -6,13 +6,14 @@
"iconClass": "icon-jboss",
"description": "Application template for Java applications built using S2I.",
"tags": "java,xpaas",
- "version": "1.0.0"
+ "version": "1.1.0",
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
},
"name": "openjdk18-web-basic-s2i"
},
"labels": {
"template": "openjdk18-web-basic-s2i",
- "xpaas": "1.0.0"
+ "xpaas": "1.4.0"
},
"message": "A new java application has been created in your project.",
"parameters": [
@@ -155,7 +156,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-openjdk18-openshift:1.0"
+ "name": "redhat-openjdk18-openshift:1.1"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
index 1dea463ac..d1705c88c 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Persistent with https)"
},
"name": "processserver63-amq-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,46 +231,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -246,6 +287,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -253,18 +295,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -272,6 +317,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -279,10 +325,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -306,7 +373,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -330,7 +398,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -460,13 +529,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -788,7 +865,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -919,7 +996,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-s2i.json
index 42264585b..665cb76a3 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
},
"name": "processserver63-amq-mysql-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,46 +217,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -234,6 +273,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -241,18 +281,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -260,6 +303,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -267,10 +311,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -294,7 +359,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -318,7 +384,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -448,13 +515,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -740,7 +815,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -837,7 +912,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
index f6d0c99ed..5a395a0f3 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
},
"name": "processserver63-amq-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,31 +231,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -231,6 +269,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -238,18 +277,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -257,6 +299,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -264,10 +307,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -291,7 +355,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -315,7 +380,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -445,13 +511,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -773,7 +847,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -892,7 +966,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-s2i.json
index 41c726cf0..e7c5efdc9 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-amq-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-amq-postgresql-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,31 +217,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -219,6 +255,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -226,18 +263,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -245,6 +285,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -252,10 +293,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -279,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -303,7 +366,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -433,13 +497,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -725,7 +797,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -810,7 +882,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-basic-s2i.json
index 170c919cb..e70d20a6e 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,javaee,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server (no https)"
},
"name": "processserver63-basic-s2i"
},
"labels": {
"template": "processserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,54 +40,63 @@
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.H2Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -190,13 +220,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-persistent-s2i.json
index 89d0db1a6..f76b07b0b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Persistent with https)"
},
"name": "processserver63-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,46 +189,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -210,6 +245,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -217,6 +253,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -381,13 +441,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-s2i.json
index 26cab29f8..a3be02eab 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Ephemeral with https)"
},
"name": "processserver63-mysql-s2i"
},
"labels": {
"template": "processserver63-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,46 +182,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -204,6 +238,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -211,6 +246,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -218,10 +254,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -245,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -269,7 +327,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -375,13 +434,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -639,7 +706,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-persistent-s2i.json
index 32a512829..361b177f9 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Persistent with https)"
},
"name": "processserver63-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,31 +189,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,6 +227,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -202,6 +235,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -366,13 +423,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -666,7 +731,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-s2i.json
index 55e2199bb..451915a1d 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver63-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-postgresql-s2i"
},
"labels": {
"template": "processserver63-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,31 +182,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -189,6 +220,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -196,6 +228,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -203,10 +236,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -230,7 +284,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,13 +416,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -624,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
new file mode 100644
index 000000000..293d04d63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
@@ -0,0 +1,1156 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-s2i.json
new file mode 100644
index 000000000..760940b36
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-mysql-s2i.json
@@ -0,0 +1,1034 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..1603bccff
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
@@ -0,0 +1,1126 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-s2i.json
new file mode 100644
index 000000000..422f51c11
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-amq-postgresql-s2i.json
@@ -0,0 +1,1004 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-basic-s2i.json
new file mode 100644
index 000000000..2bf15ff25
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-basic-s2i.json
@@ -0,0 +1,383 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server (no https)"
+ },
+ "name": "processserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "processserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.H2Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..4673dfb0d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-persistent-s2i.json
@@ -0,0 +1,860 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-s2i.json
new file mode 100644
index 000000000..9078f20b8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-mysql-s2i.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..75b6d310e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-persistent-s2i.json
@@ -0,0 +1,830 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-s2i.json
new file mode 100644
index 000000000..51923c0ad
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/processserver64-postgresql-s2i.json
@@ -0,0 +1,753 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-https.json
index fb0578a67..5e956f449 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-https.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-https.json
@@ -5,110 +5,129 @@
"annotations": {
"description": "Application template for SSO 7.0",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,java,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
},
"name": "sso70-https"
},
"labels": {
"template": "sso70-https",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -116,54 +135,65 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
@@ -283,10 +313,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql-persistent.json
index dcbb24bf1..0fb2703c7 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql-persistent.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Persistent)"
},
"name": "sso70-mysql-persistent"
},
"labels": {
"template": "sso70-mysql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,36 +159,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,58 +202,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -248,7 +295,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +320,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -364,10 +413,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -639,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql.json
index 1768f7a1b..9beae806b 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-mysql.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Ephemeral)"
},
"name": "sso70-mysql"
},
"labels": {
"template": "sso70-mysql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,30 +159,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -167,58 +195,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -240,10 +286,11 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -268,7 +315,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,7 +397,7 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
}
},
"spec": {
@@ -364,10 +412,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -641,7 +689,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql-persistent.json
index 4c2f81f2e..e22399351 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql-persistent.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Persistent)"
},
"name": "sso70-postgresql-persistent"
},
"labels": {
"template": "sso70-postgresql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +141,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,58 +184,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -233,7 +277,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -257,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +395,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -624,7 +670,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql.json
index d8402ef72..aa8ebaa8e 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso70-postgresql.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Ephemeral)"
},
"name": "sso70-postgresql"
},
"labels": {
"template": "sso70-postgresql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,30 +141,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -152,58 +177,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -228,7 +271,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -253,7 +297,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +394,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -626,7 +671,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-https.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-https.json
new file mode 100644
index 000000000..bee86d7c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-https.json
@@ -0,0 +1,544 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "name": "sso71-https"
+ },
+ "labels": {
+ "template": "sso71-https",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql-persistent.json
new file mode 100644
index 000000000..49b37f348
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql-persistent.json
@@ -0,0 +1,799 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Persistent)"
+ },
+ "name": "sso71-mysql-persistent"
+ },
+ "labels": {
+ "template": "sso71-mysql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql.json
new file mode 100644
index 000000000..634a75bab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-mysql.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Ephemeral)"
+ },
+ "name": "sso71-mysql"
+ },
+ "labels": {
+ "template": "sso71-mysql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql-persistent.json
new file mode 100644
index 000000000..c53bb9d5b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql-persistent.json
@@ -0,0 +1,773 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Persistent)"
+ },
+ "name": "sso71-postgresql-persistent"
+ },
+ "labels": {
+ "template": "sso71-postgresql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql.json
new file mode 100644
index 000000000..c1fc41eda
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/sso71-postgresql.json
@@ -0,0 +1,741 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Ephemeral)"
+ },
+ "name": "sso71-postgresql"
+ },
+ "labels": {
+ "template": "sso71-postgresql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml
deleted file mode 100644
index 14bdd1dca..000000000
--- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cloudforms
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes-app
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..250a99b8d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv01
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml
deleted file mode 100644
index 709d8d976..000000000
--- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: nfs-pv01
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml
new file mode 100644
index 000000000..cba9bbe35
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv02
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..c08c21265
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv03
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
index 4f25a9c8f..3bc6c5813 100644
--- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
@@ -17,6 +17,7 @@ objects:
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
name: ${NAME}
spec:
+ clusterIP: None
ports:
- name: http
port: 80
@@ -48,11 +49,27 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms app image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${DATABASE_SERVICE_NAME}
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- ReadWriteOnce
@@ -62,45 +79,41 @@ objects:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${NAME}
+ name: "${NAME}-region"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
- storage: ${APPLICATION_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: "DeploymentConfig"
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+- apiVersion: apps/v1beta1
+ kind: "StatefulSet"
metadata:
name: ${NAME}
annotations:
description: "Defines how to deploy the CloudForms appliance"
spec:
+ serviceName: "${NAME}"
+ replicas: 1
template:
metadata:
labels:
name: ${NAME}
name: ${NAME}
spec:
- volumes:
- -
- name: "cfme-app-volume"
- persistentVolumeClaim:
- claimName: ${NAME}
containers:
- - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
- imagePullPolicy: IfNotPresent
- name: cloudforms
+ - name: cloudforms
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
livenessProbe:
- httpGet:
- path: /
- port: 80
+ tcpSocket:
+ port: 443
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /
- port: 80
+ port: 443
+ scheme: HTTPS
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -112,8 +125,11 @@ objects:
privileged: true
volumeMounts:
-
- name: "cfme-app-volume"
+ name: "${NAME}-server"
mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
env:
-
name: "APPLICATION_INIT_DELAY"
@@ -144,29 +160,32 @@ objects:
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
requests:
- memory: "${MEMORY_APPLICATION_MIN}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
lifecycle:
preStop:
exec:
command:
- /opt/rh/cfme-container-scripts/sync-pv-data
- replicas: 1
- selector:
- name: ${NAME}
- triggers:
- - type: "ConfigChange"
- - type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "cloudforms"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
- strategy:
- type: "Recreate"
- recreateParams:
- timeoutSeconds: 1200
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -182,14 +201,6 @@ objects:
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -223,7 +234,7 @@ objects:
containers:
-
name: "memcached"
- image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
ports:
-
containerPort: 11211
@@ -249,8 +260,11 @@ objects:
name: "MEMCACHED_SLAB_PAGE_SIZE"
value: "${MEMCACHED_SLAB_PAGE_SIZE}"
resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
limits:
- memory: "${MEMORY_MEMCACHED_LIMIT}"
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -266,14 +280,6 @@ objects:
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${DATABASE_SERVICE_NAME}"
@@ -307,11 +313,11 @@ objects:
-
name: "cfme-pgdb-volume"
persistentVolumeClaim:
- claimName: ${DATABASE_SERVICE_NAME}
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
containers:
-
name: "postgresql"
- image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
ports:
-
containerPort: 5432
@@ -350,8 +356,11 @@ objects:
name: "POSTGRESQL_SHARED_BUFFERS"
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
limits:
- memory: "${MEMORY_POSTGRESQL_LIMIT}"
+ memory: "${POSTGRESQL_MEM_LIMIT}"
parameters:
-
@@ -420,36 +429,87 @@ parameters:
name: "POSTGRESQL_SHARED_BUFFERS"
displayName: "PostgreSQL Shared Buffer Amount"
description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "64MB"
+ value: "256MB"
-
- name: "MEMORY_APPLICATION_MIN"
- displayName: "Application Memory Minimum"
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
required: true
description: "Minimum amount of memory the Application container will need."
- value: "4096Mi"
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
-
- name: "MEMORY_POSTGRESQL_LIMIT"
- displayName: "PostgreSQL Memory Limit"
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
required: true
- description: "Maximum amount of memory the PostgreSQL container can use."
- value: "2048Mi"
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
-
- name: "MEMORY_MEMCACHED_LIMIT"
- displayName: "Memcached Memory Limit"
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
required: true
- description: "Maximum amount of memory the Memcached container can use."
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
value: "256Mi"
-
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
+ -
name: "POSTGRESQL_IMG_TAG"
displayName: "PostgreSQL Image Tag"
description: "This is the PostgreSQL image tag/version requested to deploy."
value: "latest"
-
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
+ -
name: "MEMCACHED_IMG_TAG"
displayName: "Memcached Image Tag"
description: "This is the Memcached image tag/version requested to deploy."
value: "latest"
-
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
+ -
name: "APPLICATION_IMG_TAG"
displayName: "Application Image Tag"
description: "This is the Application image tag/version requested to deploy."
@@ -464,16 +524,22 @@ parameters:
displayName: "Application Init Delay"
required: true
description: "Delay in seconds before we attempt to initialize the application."
- value: "30"
+ value: "15"
-
name: "APPLICATION_VOLUME_CAPACITY"
displayName: "Application Volume Capacity"
required: true
description: "Volume space available for application data."
- value: "1Gi"
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
-
name: "DATABASE_VOLUME_CAPACITY"
displayName: "Database Volume Capacity"
required: true
description: "Volume space available for database."
- value: "1Gi"
+ value: "15Gi"
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..536f7275e 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
index 6ed744777..3b7fdccce 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..ee274194f 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
index 0656219fb..e5ba43669 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
index d60b4647d..969e62ac5 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -36,7 +41,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
index c2bfa40fd..4f39d41a5 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..c37102cb0 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
index 242212d6f..32dc93a95 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
index e9af50937..6bb683e52 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
index aa27578a9..9e8be2309 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
index 1a90a9409..6cef21945 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/httpd-24-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +468,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +484,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +500,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +516,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +532,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -800,7 +861,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
index eb94c3bb4..abdae01e3 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/httpd-24-rhel7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +314,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
@@ -707,7 +768,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
index f48d8d4a8..6d2ccbf7f 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
@@ -17,6 +17,7 @@ instantiating them.
* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex).
* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/amp.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/amp.yml
new file mode 100644
index 000000000..4e469f6e8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/amp.yml
@@ -0,0 +1,1261 @@
+base_env: &base_env
+- name: RAILS_ENV
+ value: "production"
+- name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+- name: FORCE_SSL
+ value: "true"
+- name: THREESCALE_SUPERDOMAIN
+ value: "${WILDCARD_DOMAIN}"
+- name: TENANT_NAME
+ value: "${TENANT_NAME}"
+- name: APICAST_ACCESS_TOKEN
+ value: "${APICAST_ACCESS_TOKEN}"
+- name: ADMIN_ACCESS_TOKEN
+ value: "${ADMIN_ACCESS_TOKEN}"
+- name: PROVIDER_PLAN
+ value: 'enterprise'
+- name: USER_LOGIN
+ value: "${ADMIN_USERNAME}"
+- name: USER_PASSWORD
+ value: "${ADMIN_PASSWORD}"
+- name: RAILS_LOG_TO_STDOUT
+ value: "true"
+- name: RAILS_LOG_LEVEL
+ value: "info"
+- name: THINKING_SPHINX_ADDRESS
+ value: "system-sphinx"
+- name: THINKING_SPHINX_PORT
+ value: "9306"
+- name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "/tmp/sphinx.conf"
+- name: EVENTS_SHARED_SECRET
+ value: "${SYSTEM_BACKEND_SHARED_SECRET}"
+- name: THREESCALE_SANDBOX_PROXY_OPENSSL_VERIFY_MODE
+ value: "VERIFY_NONE"
+- name: APICAST_BACKEND_ROOT_ENDPOINT
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+- name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+- name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+- name: SECRET_KEY_BASE
+ value: "${SYSTEM_APP_SECRET_KEY_BASE}"
+- name: AMP_RELEASE
+ value: "${AMP_RELEASE}"
+- name: SMTP_ADDRESS
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: address
+- name: SMTP_USER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: username
+- name: SMTP_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: password
+- name: SMTP_DOMAIN
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: domain
+- name: SMTP_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: port
+- name: SMTP_AUTHENTICATION
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: authentication
+- name: SMTP_OPENSSL_VERIFY_MODE
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: openssl.verify.mode
+- name: BACKEND_ROUTE
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: "system"
+message: "Login on https://${TENANT_NAME}-admin.${WILDCARD_DOMAIN} as ${ADMIN_USERNAME}/${ADMIN_PASSWORD}"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-cron
+ spec:
+ replicas: 1
+ selector:
+ name: backend-cron
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-cron
+ spec:
+ containers:
+ - args:
+ - backend-cron
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-cron
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-redis
+ spec:
+ replicas: 1
+ selector:
+ name: backend-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: backend-redis
+ spec:
+ containers:
+ - image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: backend-redis
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ volumeMounts:
+ - name: backend-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ volumes:
+ - name: backend-redis-storage
+ persistentVolumeClaim:
+ claimName: backend-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-listener
+ spec:
+ replicas: 1
+ selector:
+ name: backend-listener
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-listener
+ spec:
+ containers:
+ - args:
+ - 3scale_backend
+ - start
+ - "-e"
+ - production
+ - "-p"
+ - '3000'
+ - "-x"
+ - "/dev/stdout"
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+ - name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-listener
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3000
+ readinessProbe:
+ httpGet:
+ path: "/status"
+ port: 3000
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ selector:
+ name: backend-redis
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-listener
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: 3000
+ name: http
+ selector:
+ name: backend-listener
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-provider
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: provider
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-developer
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: developer
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-worker
+ spec:
+ replicas: 1
+ selector:
+ name: backend-worker
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-worker
+ spec:
+ containers:
+ - args:
+ - 3scale_backend_worker
+ - run
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_EVENTS_HOOK
+ value: http://system-provider:3000/master/events/import
+ - name: CONFIG_EVENTS_HOOK_SHARED_SECRET
+ value: ${SYSTEM_BACKEND_SHARED_SECRET}
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-worker
+ triggers:
+ - type: ConfigChange
+
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ ports:
+ - name: system-mysql
+ protocol: TCP
+ port: 3306
+ targetPort: 3306
+ nodePort: 0
+ selector:
+ name: 'system-mysql'
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ name: redis
+ selector:
+ name: system-redis
+
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-redis
+ spec:
+ replicas: 1
+ selector:
+ name: system-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: system-redis
+ spec:
+ containers:
+ - args:
+ image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: system-redis
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - name: system-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: system-redis-storage
+ persistentVolumeClaim:
+ claimName: system-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-sphinx
+ spec:
+ ports:
+ - port: 9306
+ protocol: TCP
+ targetPort: 9306
+ name: sphinx
+ selector:
+ name: system-sphinx
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sphinx
+ spec:
+ replicas: 1
+ selector:
+ name: system-sphinx
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sphinx
+ spec:
+ volumes:
+ - name: system-sphinx-database
+ emptyDir: {}
+ containers:
+ - args:
+ - rake
+ - 'openshift:thinking_sphinx:start'
+ volumeMounts:
+ - name: system-sphinx-database
+ mountPath: "/opt/system/db/sphinx"
+ env:
+ - name: RAILS_ENV
+ value: production
+ - name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+ - name: THINKING_SPHINX_ADDRESS
+ value: 0.0.0.0
+ - name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "db/sphinx/production.conf"
+ - name: THINKING_SPHINX_PID_FILE
+ value: db/sphinx/searchd.pid
+ - name: DELTA_INDEX_INTERVAL
+ value: '5'
+ - name: FULL_REINDEX_INTERVAL
+ value: '60'
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sphinx
+ livenessProbe:
+ tcpSocket:
+ port: 9306
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-memcache
+ spec:
+ ports:
+ - port: 11211
+ protocol: TCP
+ targetPort: 11211
+ name: memcache
+ selector:
+ name: system-memcache
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-memcache
+ spec:
+ replicas: 1
+ selector:
+ name: system-memcache
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-memcache
+ spec:
+ containers:
+ - args:
+ env:
+ image: 3scale-amp20/memcached:1.4.15-7
+ imagePullPolicy: IfNotPresent
+ name: memcache
+ readinessProbe:
+ exec:
+ command:
+ - "sh"
+ - "-c"
+ - "echo version | nc $HOSTNAME 11211 | grep VERSION"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 11211
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ command:
+ - "memcached"
+ - "-m"
+ - "64"
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-provider-admin-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}-admin.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-provider
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: backend-route
+ labels:
+ app: system-route
+ spec:
+ host: backend-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: backend-listener
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-developer-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-developer
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-staging
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-staging
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-staging
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: http://${APICAST_ACCESS_TOKEN}@system-provider:3000
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "sandbox"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/2"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-staging
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-staging
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-staging
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-production
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-production
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-production
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: "http://${APICAST_ACCESS_TOKEN}@system-provider:3000"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "boot"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "300"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "production"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/1"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-production
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-production
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-production
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-staging-route
+ labels:
+ app: apicast-staging
+ spec:
+ host: api-${TENANT_NAME}-apicast-staging.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-staging
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-production-route
+ labels:
+ app: apicast-production
+ spec:
+ host: api-${TENANT_NAME}-apicast-production.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-production
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-app
+ spec:
+ replicas: 1
+ selector:
+ name: system-app
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ pre:
+ failurePolicy: Retry
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:deploy
+ env: *base_env
+ volumes:
+ - system-storage
+ post:
+ failurePolicy: Abort
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:post_deploy
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-app
+ spec:
+ containers:
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ command: ['env', 'TENANT_MODE=provider', 'PORT=3000', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ name: system-provider
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: provider
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: provider
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ name: provider
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ command: ['env', 'TENANT_MODE=developer', 'PORT=3001', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ imagePullPolicy: IfNotPresent
+ name: system-developer
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: developer
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: developer
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3001
+ protocol: TCP
+ name: developer
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ readOnly: true
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-resque
+ spec:
+ replicas: 1
+ selector:
+ name: system-resque
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-resque
+ spec:
+ containers:
+ - args:
+ - 'rake'
+ - 'resque:work'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-resque
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ - 'rake'
+ - 'resque:scheduler'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-scheduler
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sidekiq
+ spec:
+ replicas: 1
+ selector:
+ name: system-sidekiq
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sidekiq
+ spec:
+ containers:
+ - args:
+ - rake
+ - sidekiq:worker
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sidekiq
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: 'system-mysql'
+ template:
+ metadata:
+ labels:
+ name: 'system-mysql'
+ spec:
+ containers:
+ - name: system-mysql
+ image: ${MYSQL_IMAGE}
+ ports:
+ - containerPort: 3306
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ readinessProbe:
+ timeoutSeconds: 5
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ exec:
+ command:
+ - /bin/sh
+ - '-i'
+ - '-c'
+ - MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3306
+ env:
+ - name: MYSQL_USER
+ value: ${MYSQL_USER}
+ - name: MYSQL_PASSWORD
+ value: ${MYSQL_PASSWORD}
+ - name: MYSQL_DATABASE
+ value: ${MYSQL_DATABASE}
+ - name: MYSQL_ROOT_PASSWORD
+ value: ${MYSQL_ROOT_PASSWORD}
+ - name: MYSQL_LOWER_CASE_TABLE_NAMES
+ value: "1"
+ volumeMounts:
+ - name: 'mysql-storage'
+ mountPath: /var/lib/mysql/data
+ imagePullPolicy: IfNotPresent
+ volumes:
+ - name: 'mysql-storage'
+ persistentVolumeClaim:
+ claimName: 'mysql-storage'
+- kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: redis-config
+ data:
+ redis.conf: |
+ protected-mode no
+
+ port 6379
+
+ timeout 0
+ tcp-keepalive 300
+
+ daemonize no
+ supervised no
+
+ loglevel notice
+
+ databases 16
+
+ save 900 1
+ save 300 10
+ save 60 10000
+
+ stop-writes-on-bgsave-error yes
+
+ rdbcompression yes
+ rdbchecksum yes
+
+ dbfilename dump.rdb
+
+ slave-serve-stale-data yes
+ slave-read-only yes
+
+ repl-diskless-sync no
+ repl-disable-tcp-nodelay no
+
+ appendonly yes
+ appendfilename "appendonly.aof"
+ appendfsync everysec
+ no-appendfsync-on-rewrite no
+ auto-aof-rewrite-percentage 100
+ auto-aof-rewrite-min-size 64mb
+ aof-load-truncated yes
+
+ lua-time-limit 5000
+
+ activerehashing no
+
+ aof-rewrite-incremental-fsync yes
+ dir /var/lib/redis/data
+
+- kind: ConfigMap
+
+ apiVersion: v1
+ metadata:
+ name: smtp
+ data:
+ address: ""
+ username: ""
+ password: ""
+ domain: ""
+ port: ""
+ authentication: ""
+ openssl.verify.mode: ""
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: ADMIN_PASSWORD
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+- name: ADMIN_USERNAME
+ value: admin
+ required: true
+- name: APICAST_ACCESS_TOKEN
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+ description: "Read Only Access Token that is APIcast going to use to download its configuration."
+- name: ADMIN_ACCESS_TOKEN
+ required: false
+ generate: expression
+ from: "[a-z0-9]{16}"
+ description: "Admin Access Token with all scopes and write permissions for API access."
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Tenant name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
+- name: MYSQL_USER
+ displayName: MySQL User
+ description: Username for MySQL user that will be used for accessing the database.
+ value: "mysql"
+ required: true
+- name: MYSQL_PASSWORD
+ displayName: MySQL Password
+ description: Password for the MySQL user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: MYSQL_DATABASE
+ displayName: MySQL Database Name
+ description: Name of the MySQL database accessed.
+ value: "system"
+ required: true
+- name: MYSQL_ROOT_PASSWORD
+ displayName: MySQL Root password.
+ description: Password for Root user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_BACKEND_USERNAME
+ description: Internal 3scale API username for internal 3scale api auth.
+ value: "3scale_api_user"
+ required: true
+- name: SYSTEM_BACKEND_PASSWORD
+ description: Internal 3scale API password for internal 3scale api auth.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: REDIS_IMAGE
+ description: Redis image to use
+ required: true
+ value: rhscl/redis-32-rhel7:3.2-5.7
+- name: MYSQL_IMAGE
+ description: Mysql image to use
+ required: true
+ value: rhscl/mysql-56-rhel7:5.6-13.14
+- name: SYSTEM_BACKEND_SHARED_SECRET
+ description: Shared secret to import events from backend to system.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_APP_SECRET_KEY_BASE
+ description: System application secret key base
+ generate: expression
+ from: "[a-f0-9]{128}"
+ required: true
+- name: APICAST_MANAGEMENT_API
+ description: "Scope of the APIcast Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: APICAST_OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification when downloading the configuration. Can be set to true/false."
+ required: false
+ value: "false"
+- name: APICAST_RESPONSE_CODES
+ description: "Enable logging response codes in APIcast."
+ value: "true"
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml
deleted file mode 100644
index 34f5fcbcc..000000000
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: 3scale-gateway
- annotations:
- description: "3scale API Gateway"
- iconClass: "icon-load-balancer"
- tags: "api,gateway,3scale"
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- replicas: 2
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 25%
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- labels:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- spec:
- containers:
- - env:
- - name: THREESCALE_PORTAL_ENDPOINT
- valueFrom:
- secretKeyRef:
- name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
- key: password
- - name: THREESCALE_CONFIG_FILE
- value: ${THREESCALE_CONFIG_FILE}
- - name: RESOLVER
- value: ${RESOLVER}
- - name: APICAST_SERVICES
- value: ${APICAST_SERVICES}
- - name: APICAST_MISSING_CONFIGURATION
- value: ${MISSING_CONFIGURATION}
- - name: APICAST_LOG_LEVEL
- value: ${APICAST_LOG_LEVEL}
- - name: APICAST_PATH_ROUTING_ENABLED
- value: ${PATH_ROUTING}
- - name: APICAST_RESPONSE_CODES
- value: ${RESPONSE_CODES}
- - name: APICAST_REQUEST_LOGS
- value: ${REQUEST_LOGS}
- - name: APICAST_RELOAD_CONFIG
- value: ${APICAST_RELOAD_CONFIG}
- image: ${THREESCALE_GATEWAY_IMAGE}
- imagePullPolicy: Always
- name: ${THREESCALE_GATEWAY_NAME}
- livenessProbe:
- httpGet:
- path: /status/live
- port: 8090
- initialDelaySeconds: 10
- timeoutSeconds: 1
- readinessProbe:
- httpGet:
- path: /status/ready
- port: 8090
- initialDelaySeconds: 15
- timeoutSeconds: 1
- ports:
- - containerPort: 8080
- protocol: TCP
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- securityContext: {}
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status: {}
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
-parameters:
-- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
- value: threescale-portal-endpoint-secret
- name: THREESCALE_PORTAL_ENDPOINT_SECRET
- required: true
-- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
- value:
- name: THREESCALE_CONFIG_FILE
- required: false
-- description: "Name for the 3scale API Gateway"
- value: threescalegw
- name: THREESCALE_GATEWAY_NAME
- required: true
-- description: "Docker image to use."
- value: 'rhamp10/apicast-gateway:1.0.0-4'
- name: THREESCALE_GATEWAY_IMAGE
- required: true
-- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
- value:
- name: RESOLVER
- required: false
-- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
- value:
- name: APICAST_SERVICES
- required: false
-- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
- value: exit
- required: false
- name: MISSING_CONFIGURATION
-- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
- name: APICAST_LOG_LEVEL
- required: false
-- description: "Enable path routing. Experimental feature."
- name: PATH_ROUTING
- required: false
- value: "false"
-- description: "Enable traffic logging to 3scale. Includes whole request and response."
- value: "false"
- name: REQUEST_LOGS
- required: false
-- description: "Enable logging response codes to 3scale."
- value: "false"
- name: RESPONSE_CODES
- required: false
-- description: "Reload config on every request"
- value: "false"
- name: APICAST_RELOAD_CONFIG
- required: false
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast.yml
new file mode 100644
index 000000000..8e8051c0b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast.yml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: "${APICAST_NAME}"
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: "${CONFIGURATION_URL_SECRET}"
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: "${CONFIGURATION_FILE_PATH}"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "${DEPLOYMENT_ENVIRONMENT}"
+ - name: RESOLVER
+ value: "${RESOLVER}"
+ - name: APICAST_SERVICES
+ value: "${SERVICES_LIST}"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "${CONFIGURATION_LOADER}"
+ - name: APICAST_LOG_LEVEL
+ value: "${LOG_LEVEL}"
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: "${PATH_ROUTING}"
+ - name: APICAST_RESPONSE_CODES
+ value: "${RESPONSE_CODES}"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "${CONFIGURATION_CACHE}"
+ - name: REDIS_URL
+ value: "${REDIS_URL}"
+ - name: APICAST_MANAGEMENT_API
+ value: "${MANAGEMENT_API}"
+ - name: OPENSSL_VERIFY
+ value: "${OPENSSL_VERIFY}"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: "${APICAST_NAME}"
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: management
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - name: proxy
+ containerPort: 8080
+ protocol: TCP
+ - name: management
+ containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ ports:
+ - name: proxy
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: apicast-configuration-url-secret
+ name: CONFIGURATION_URL_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: CONFIGURATION_FILE_PATH
+ required: false
+- description: "Deployment environment. Can be sandbox or production."
+ value: production
+ name: DEPLOYMENT_ENVIRONMENT
+ required: true
+- description: "Name for the 3scale API Gateway"
+ value: apicast
+ name: APICAST_NAME
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: SERVICES_LIST
+ required: false
+- name: CONFIGURATION_LOADER
+ description: "When to load configuration. If on gateway start or incoming request. Allowed values are: lazy, boot."
+ value: boot
+ required: false
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- name: CONFIGURATION_CACHE
+ description: "For how long to cache the downloaded configuration in seconds. Can be left empty, 0 or greater than 60."
+ value: ""
+ required: false
+- description: "Redis URL. Required for OAuth2 integration. ex: redis://PASSWORD@127.0.0.1:6379/0"
+ name: REDIS_URL
+ required: false
+- name: MANAGEMENT_API
+ description: "Scope of the Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification. Can be set to true/false."
+ required: true
+ value: "false"
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..6d987ee33 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -510,7 +513,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
index da2454d2e..fb2ef206e 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -484,7 +487,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..7ffb25e14 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -472,7 +478,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
index 7a285dba8..d787e376b 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -446,7 +452,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..a2070207b 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -479,7 +482,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
index 7bee85ddd..0d33c6e0e 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -453,7 +456,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json
index a09d71a00..af46579c8 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dotnet-example",
"annotations": {
- "openshift.io/display-name": ".NET Core",
+ "openshift.io/display-name": ".NET Core Example",
"description": "An example .NET Core application.",
"tags": "quickstart,dotnet,.net",
"iconClass": "icon-dotnet",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/httpd.json
new file mode 100644
index 000000000..ac671cc06
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/httpd.json
@@ -0,0 +1,274 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "httpd-example",
+ "annotations": {
+ "openshift.io/display-name": "Httpd",
+ "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "tags": "quickstart,httpd",
+ "iconClass": "icon-apache",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "labels": {
+ "template": "httpd-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "httpd:2.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "httpd-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "httpd-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": [
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "httpd-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/httpd-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the httpd service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
index 264e4b2de..ce96684a9 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
index b47bdf353..34b2b920b 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..a9c365361 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -491,7 +494,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..53a6147d5 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -467,7 +470,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/pvc.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/pvc.yml
new file mode 100644
index 000000000..0bbb8e625
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/pvc.yml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-pvc"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..f07a43071 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -526,7 +533,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
index fa67412ff..a7992c988 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -500,7 +507,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/wildcard.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/wildcard.yml
new file mode 100644
index 000000000..00dedecd5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/wildcard.yml
@@ -0,0 +1,158 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-apicast-wildcard-router"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-router
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-router
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-router
+ spec:
+ volumes:
+ - name: apicast-router-config
+ configMap:
+ name: apicast-router-config
+ items:
+ - key: router.conf
+ path: router.conf
+ containers:
+ - env:
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-router
+ command: ['bin/apicast']
+ livenessProbe:
+ tcpSocket:
+ port: router
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 30
+ volumeMounts:
+ - name: apicast-router-config
+ mountPath: /opt/app-root/src/sites.d/
+ readOnly: true
+ ports:
+ - containerPort: 8082
+ name: router
+ protocol: TCP
+ - containerPort: 8090
+ name: management
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-router
+ spec:
+ ports:
+ - name: router
+ port: 80
+ protocol: TCP
+ targetPort: router
+ selector:
+ deploymentconfig: apicast-router
+
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: apicast-router-config
+ data:
+ router.conf: |-
+ upstream wildcard {
+ server 0.0.0.1:1;
+
+ balancer_by_lua_block {
+ local round_robin = require 'resty.balancer.round_robin'
+ local balancer = round_robin.new()
+ local peers = balancer:peers(ngx.ctx.apicast)
+
+ local peer, err = balancer:set_peer(peers)
+
+ if not peer then
+ ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
+ ngx.log(ngx.ERR, "failed to set current backend peer: ", err)
+ ngx.exit(ngx.status)
+ end
+ }
+
+ keepalive 1024;
+ }
+
+ server {
+ listen 8082;
+ server_name ~-(?<apicast>apicast-(staging|production))\.;
+ access_log /dev/stdout combined;
+
+ location / {
+ access_by_lua_block {
+ local resolver = require('resty.resolver'):instance()
+ local servers = resolver:get_servers(ngx.var.apicast, { port = 8080 })
+
+ if #servers == 0 then
+ ngx.status = ngx.HTTP_BAD_GATEWAY
+ ngx.exit(ngx.HTTP_OK)
+ end
+
+ ngx.ctx.apicast = servers
+ }
+ proxy_http_version 1.1;
+ proxy_pass $scheme://wildcard;
+ proxy_set_header Host $host;
+ proxy_set_header Connection "";
+ }
+ }
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: apicast-wildcard-router
+ labels:
+ app: apicast-wildcard-router
+ spec:
+ host: apicast-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-router
+ port:
+ targetPort: router
+ wildcardPolicy: Subdomain
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Domain name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
index 049f3f884..0bb56452b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
@@ -12,7 +12,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver30-tomcat7-openshift"
+ "name": "jboss-webserver30-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
@@ -23,10 +26,11 @@
"description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
}
},
{
@@ -35,10 +39,23 @@
"description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
}
}
]
@@ -48,7 +65,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver30-tomcat8-openshift"
+ "name": "jboss-webserver30-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
@@ -59,10 +79,11 @@
"description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
}
},
{
@@ -71,10 +92,23 @@
"description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
}
}
]
@@ -84,7 +118,66 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-eap64-openshift"
+ "name": "jboss-webserver31-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports": "tomcat7:3.1,tomcat:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver31-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports": "tomcat8:3.1,tomcat:8,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
@@ -95,11 +188,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.1",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.1"
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -108,11 +202,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -121,11 +216,12 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.3",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.3"
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
}
},
{
@@ -134,11 +230,25 @@
"description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.4",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.5"
}
}
]
@@ -148,7 +258,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-eap70-openshift"
+ "name": "jboss-eap70-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
@@ -159,11 +272,12 @@
"description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.3"
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
}
},
{
@@ -172,11 +286,25 @@
"description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.4",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.5"
}
}
]
@@ -186,7 +314,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-decisionserver62-openshift"
+ "name": "jboss-decisionserver62-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
@@ -196,12 +327,13 @@
"annotations": {
"description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
"iconClass": "icon-jboss",
- "tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.2,java:8,xpaas:1.2",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.2,xpaas:1.2",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.2",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
}
}
]
@@ -211,7 +343,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-decisionserver63-openshift"
+ "name": "jboss-decisionserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
@@ -221,12 +356,51 @@
"annotations": {
"description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
"iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
"tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.3,java:8,xpaas:1.3",
+ "supports":"decisionserver:6.3,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.3"
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.4 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.0"
}
}
]
@@ -236,7 +410,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-processserver63-openshift"
+ "name": "jboss-processserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
@@ -246,12 +423,26 @@
"annotations": {
"description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
"iconClass": "icon-jboss",
+ "tags": "builder,processserver,xpaas",
+ "supports": "processserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
"tags": "builder,processserver,java,xpaas",
- "supports":"processserver:6.3,java:8,xpaas:1.3",
+ "supports":"processserver:6.3,java:8,xpaas:1.4",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.3"
+ "version": "1.4"
}
}
]
@@ -261,7 +452,35 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-datagrid65-openshift"
+ "name": "jboss-processserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.4 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
@@ -271,9 +490,56 @@
"annotations": {
"description": "JBoss Data Grid 6.5 S2I images.",
"iconClass": "icon-jboss",
- "tags": "datagrid,java,jboss,xpaas",
- "supports":"datagrid:6.5,java:8,xpaas:1.2",
- "version": "1.2"
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.4",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports":"datagrid:6.5,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-client-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 Client Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
}
}
]
@@ -283,7 +549,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-datavirt63-openshift"
+ "name": "jboss-datavirt63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
@@ -293,9 +562,56 @@
"annotations": {
"description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
"iconClass": "icon-jboss",
- "tags": "datavirt,java,jboss,xpaas",
- "supports":"datavirt:6.3,java:8,xpaas:1.4",
- "version": "1.0"
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports":"datavirt:6.3,xpaas:1.4",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-driver-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
}
}
]
@@ -305,7 +621,10 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-amq-62"
+ "name": "jboss-amq-62",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
@@ -316,8 +635,9 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.1",
- "version": "1.1"
+ "supports": "amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
}
},
{
@@ -326,8 +646,9 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.2",
- "version": "1.2"
+ "supports": "amq:6.2,messaging,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
}
},
{
@@ -336,8 +657,45 @@
"description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.3",
- "version": "1.3"
+ "supports": "amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-63",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss A-MQ 6.3 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.3,messaging,xpaas:1.0",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
}
}
]
@@ -348,8 +706,9 @@
"apiVersion": "v1",
"metadata": {
"name": "redhat-sso70-openshift",
- "annotations": {
- "description": "Red Hat SSO 7.0"
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
}
},
"spec": {
@@ -361,8 +720,20 @@
"description": "Red Hat SSO 7.0",
"iconClass": "icon-jboss",
"tags": "sso,keycloak,redhat",
- "supports":"sso:7.0,xpaas:1.3",
- "version": "1.3"
+ "supports": "sso:7.0,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.0,xpaas:1.4",
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
}
}
]
@@ -372,7 +743,48 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "redhat-openjdk18-openshift"
+ "name": "redhat-sso71-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso71-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
+ }
},
"spec": {
"dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
@@ -384,11 +796,24 @@
"description": "Build and run Java applications using Maven and OpenJDK 8.",
"iconClass": "icon-jboss",
"tags": "builder,java,xpaas,openjdk",
- "supports":"java:8,xpaas:1.0",
+ "supports": "java:8,xpaas:1.0",
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.0"
}
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports": "java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.1"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json
index ab35afead..af20b373a 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral, no SSL)"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,18 +69,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -171,7 +183,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -202,7 +215,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json
index c12f06dec..5acdbfabf 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent with SSL)"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,48 +83,56 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -309,7 +328,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -340,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json
index 897ce0395..b8089cd6d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent, no SSL)"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,18 +83,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -183,7 +197,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -214,7 +229,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json
index 97d110286..b52fdbfb0 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral with SSL)"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,48 +69,56 @@
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -297,7 +314,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -328,7 +346,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-basic.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-basic.json
new file mode 100644
index 000000000..d29f6a300
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-basic.json
@@ -0,0 +1,334 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral, no SSL)"
+ },
+ "name": "amq63-basic"
+ },
+ "labels": {
+ "template": "amq63-basic",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent-ssl.json
new file mode 100644
index 000000000..47f6396dd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent-ssl.json
@@ -0,0 +1,569 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent with SSL)"
+ },
+ "name": "amq63-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq63-persistent-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent.json
new file mode 100644
index 000000000..4b64203c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-persistent.json
@@ -0,0 +1,386 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent, no SSL)"
+ },
+ "name": "amq63-persistent"
+ },
+ "labels": {
+ "template": "amq63-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-ssl.json
new file mode 100644
index 000000000..20ad50016
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq63-ssl.json
@@ -0,0 +1,521 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral with SSL)"
+ },
+ "name": "amq63-ssl"
+ },
+ "labels": {
+ "template": "amq63-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json
index 56e76016f..32433bef0 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json
@@ -6,76 +6,103 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral, no https)"
},
"name": "datagrid65-basic"
},
"labels": {
"template": "datagrid65-basic",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -200,7 +227,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -267,9 +294,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -302,6 +334,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json
index 639ac2e11..e6f020400 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json
@@ -6,130 +6,166 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral with https)"
},
"name": "datagrid65-https"
},
"labels": {
"template": "datagrid65-https",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -301,7 +337,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -386,9 +422,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -437,6 +478,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json
index 22ca3f0a0..ff57a7936 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Persistent with https)"
},
"name": "datagrid65-mysql-persistent"
},
"labels": {
"template": "datagrid65-mysql-persistent",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,117 +111,158 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -230,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +311,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +336,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -302,7 +361,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -397,7 +457,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -482,9 +542,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -585,6 +650,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -669,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json
index e1a585d24..44902de25 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Ephemeral with https)"
},
"name": "datagrid65-mysql"
},
"labels": {
"template": "datagrid65-mysql",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,111 +111,151 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -224,7 +279,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -248,7 +304,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +329,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -296,7 +354,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -391,7 +450,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -476,9 +535,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -579,6 +643,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -663,7 +735,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json
index 12720eb19..6b90e1370 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Persistent with https)"
},
"name": "datagrid65-postgresql-persistent"
},
"labels": {
"template": "datagrid65-postgresql-persistent",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,102 +111,140 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -215,7 +268,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -239,7 +293,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -382,7 +437,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -467,9 +522,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -570,6 +630,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -654,7 +722,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json
index da8015fb0..ae36376db 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications built using.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Ephemeral with https)"
},
"name": "datagrid65-postgresql"
},
"labels": {
"template": "datagrid65-postgresql",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,96 +111,133 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -209,7 +261,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -233,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -376,7 +430,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -461,9 +515,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -564,6 +623,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -648,7 +715,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json
index 7d64dac98..ea2f13742 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (no SSL)"
},
"name": "datavirt63-basic-s2i"
},
@@ -60,6 +61,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -133,6 +135,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -215,7 +238,22 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
@@ -224,8 +262,26 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
- }
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
}
},
"output": {
@@ -252,6 +308,15 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json
index 1e7c03b99..22b579ecc 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL and Extensions)"
},
"name": "datavirt63-extensions-support-s2i"
},
@@ -102,6 +103,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -238,6 +240,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -446,6 +469,19 @@
{
"from": {
"kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ },
+ {
+ "from": {
+ "kind": "ImageStreamTag",
"name": "${APPLICATION_NAME}-ext:latest"
},
"paths": [
@@ -464,12 +500,24 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
+ "name": "jboss-datavirt63-openshift:1.2"
},
"env": [
{
"name": "CUSTOM_INSTALL_DIRECTORIES",
"value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
]
}
@@ -507,6 +555,15 @@
}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -713,7 +770,7 @@
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ "value": "${HTTPS_KEYSTORE}"
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json
index 07f926ff3..9392c20a6 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL)"
},
"name": "datavirt63-secure-s2i"
},
@@ -74,6 +75,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -210,6 +212,168 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "displayName": "SSO Server URL",
+ "name": "SSO_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "SSO Server Service URL",
+ "name": "SSO_SERVICE_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "displayName": "SSO Realm",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "displayName": "SSO Username",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "displayName": "SSO User's Password",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Realm Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
+ "displayName": "SSO Realm Public Key",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type. true or false",
+ "displayName": "SSO Bearer Only",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "displayName": "SSO SAML Keystore Secret",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "displayName": "SSO SAML Keystore File",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "displayName": "SSO SAML Certificate Alias",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "displayName": "SSO SAML Keystore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "displayName": "SSO Client Secret",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Enable CORS for SSO applications. true or false",
+ "name": "SSO_ENABLE_CORS",
+ "displayName": "SSO Enable CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "displayName": "SSO SAML Logout Page",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "displayName": "SSO Disable SSL Certificate Validation",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "displayName": "SSO Truststore File",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "displayName": "SSO Truststore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "displayName": "SSO Truststore Secret",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO OpenIDConnect via auth-method",
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "displayName": "SSO OpenIDConnect Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO SAML via auth-method",
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "displayName": "SSO SAML Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -354,7 +518,22 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
@@ -363,8 +542,26 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
- }
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
}
},
"output": {
@@ -391,6 +588,15 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -597,7 +803,7 @@
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ "value": "${HTTPS_KEYSTORE}"
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
@@ -610,6 +816,98 @@
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
"value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "value": "${SSO_OPENIDCONNECT_DEPLOYMENTS}"
+ },
+ {
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "value": "${SSO_SAML_DEPLOYMENTS}"
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json
index 754a3b4c0..1989036fa 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server + A-MQ (with https)"
},
"name": "decisionserver62-amq-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-amq-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,102 +40,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -138,6 +160,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,18 +168,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +198,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -198,7 +226,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -222,7 +251,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -564,7 +594,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json
index 8be4ac90b..25b2c162c 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (no https)"
},
"name": "decisionserver62-basic-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-basic-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,6 +113,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json
index bf9047599..85605d642 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (with https)"
},
"name": "decisionserver62-https-s2i"
},
@@ -14,32 +15,38 @@
"template": "decisionserver62-https-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +162,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json
index 51e667e02..ecea54d94 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server + A-MQ (with https)"
},
"name": "decisionserver63-amq-s2i"
},
"labels": {
"template": "decisionserver63-amq-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,108 +40,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -144,6 +167,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -151,18 +175,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,10 +205,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -204,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -228,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -334,13 +378,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
@@ -574,7 +626,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json
index c5f0d006a..d655dbe94 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (no https)"
},
"name": "decisionserver63-basic-s2i"
},
"labels": {
"template": "decisionserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,10 +113,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -184,13 +213,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json
index 3db0e4c84..78e79c0cf 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (with https)"
},
"name": "decisionserver63-https-s2i"
},
"labels": {
"template": "decisionserver63-https-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,10 +162,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -273,13 +309,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-amq-s2i.json
new file mode 100644
index 000000000..c688a2a67
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-amq-s2i.json
@@ -0,0 +1,748 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server + A-MQ (with https)"
+ },
+ "name": "decisionserver64-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-amq-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-basic-s2i.json
new file mode 100644
index 000000000..778c51844
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-basic-s2i.json
@@ -0,0 +1,376 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (no https)"
+ },
+ "name": "decisionserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-https-s2i.json
new file mode 100644
index 000000000..e6c6961c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver64-https-s2i.json
@@ -0,0 +1,517 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (with https)"
+ },
+ "name": "decisionserver64-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json
index 72dbb4302..912838175 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -5,131 +5,153 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Persistent with https)"
},
"name": "eap64-amq-persistent-s2i"
},
"labels": {
"template": "eap64-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -641,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json
index 9dd847451..dd4c7a27b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json
@@ -5,119 +5,139 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Ephemeral with https)"
},
"name": "eap64-amq-s2i"
},
"labels": {
"template": "eap64-amq-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -626,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json
index 7b1800b7b..e13b3851b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (no https)"
},
"name": "eap64-basic-s2i"
},
"labels": {
"template": "eap64-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json
index 31716d84c..0da32eb40 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https)"
},
"name": "eap64-https-s2i"
},
"labels": {
"template": "eap64-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 212431056..77b75466d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Persistent with https)"
},
"name": "eap64-mongodb-persistent-s2i"
},
"labels": {
"template": "eap64-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -674,7 +744,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json
index 13fbbdd93..2785782d4 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Ephemeral with https)"
},
"name": "eap64-mongodb-s2i"
},
"labels": {
"template": "eap64-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -668,7 +737,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json
index 69fdec206..cca0f9c2b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Persistent with https)"
},
"name": "eap64-mysql-persistent-s2i"
},
"labels": {
"template": "eap64-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -681,7 +752,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json
index 2bd3c249f..5766506fd 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Ephemeral with https)"
},
"name": "eap64-mysql-s2i"
},
"labels": {
"template": "eap64-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -675,7 +745,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 31f245950..01891774d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Persistent with https)"
},
"name": "eap64-postgresql-persistent-s2i"
},
"labels": {
"template": "eap64-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -666,7 +734,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json
index eac964697..e00f2b0e3 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Ephemeral with https)"
},
"name": "eap64-postgresql-s2i"
},
"labels": {
"template": "eap64-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -660,7 +727,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json
index 09023be71..ec0739d04 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + Single Sign-On (with https)"
},
"name": "eap64-sso-s2i"
},
"labels": {
"template": "eap64-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -615,7 +671,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-third-party-db-s2i.json
new file mode 100644
index 000000000..e8f6d6585
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-third-party-db-s2i.json
@@ -0,0 +1,646 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap64-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap64-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Cluster Admin Password",
+ "description": "Admin password for HornetQ cluster.",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json
index f08cdf2f9..3f0eba6e3 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -5,131 +5,153 @@
"annotations": {
"description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (Persistent with https)"
},
"name": "eap70-amq-persistent-s2i"
},
"labels": {
"template": "eap70-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -641,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json
index 3ca9e9fab..f2d65f353 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json
@@ -5,119 +5,139 @@
"annotations": {
"description": "Application template for EAP 7 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (with https)"
},
"name": "eap70-amq-s2i"
},
"labels": {
"template": "eap70-amq-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -626,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json
index 83b4d5b24..c33e3f7cb 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (no https)"
},
"name": "eap70-basic-s2i"
},
"labels": {
"template": "eap70-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json
index 1292442a4..7542d31c8 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https)"
},
"name": "eap70-https-s2i"
},
"labels": {
"template": "eap70-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json
index 99db77d58..8a7da66c1 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Persistent with https)"
},
"name": "eap70-mongodb-persistent-s2i"
},
"labels": {
"template": "eap70-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -685,7 +755,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json
index c8150c231..ae52a3deb 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Ephemeral with https)"
},
"name": "eap70-mongodb-s2i"
},
"labels": {
"template": "eap70-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -679,7 +748,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json
index f8e5c2b04..a0a3d7717 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Persistent with https)"
},
"name": "eap70-mysql-persistent-s2i"
},
"labels": {
"template": "eap70-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -696,7 +767,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json
index 1edeb62e7..8255ade5d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Ephemeral with https)"
},
"name": "eap70-mysql-s2i"
},
"labels": {
"template": "eap70-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -690,7 +760,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json
index d11df06ee..436c541d8 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Persistent with https)"
},
"name": "eap70-postgresql-persistent-s2i"
},
"labels": {
"template": "eap70-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json
index 6b7f6d707..a2a37a886 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Ephemeral with https)"
},
"name": "eap70-postgresql-s2i"
},
"labels": {
"template": "eap70-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -675,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json
index 811602220..08a844cd9 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + Single Sign-On (with https)"
},
"name": "eap70-sso-s2i"
},
"labels": {
"template": "eap70-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -593,7 +649,7 @@
{
"name": "HOSTNAME_HTTPS",
"value": "${HOSTNAME_HTTPS}"
- },
+ },
{
"name": "HTTPS_KEYSTORE_DIR",
"value": "/etc/eap-secret-volume"
@@ -626,7 +682,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-third-party-db-s2i.json
new file mode 100644
index 000000000..9e854d7ab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-third-party-db-s2i.json
@@ -0,0 +1,657 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap70-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap70-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within Messaging subsystem.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within Messaging subsystem.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Messaging Cluster Admin Password",
+ "description": "Admin password for Messaging cluster.",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 413a6de87..4e42e0eca 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (no https)"
},
"name": "jws30-tomcat7-basic-s2i"
},
"labels": {
"template": "jws30-tomcat7-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json
index 610ea9441..f5fc2e581 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (with https)"
},
"name": "jws30-tomcat7-https-s2i"
},
"labels": {
"template": "jws30-tomcat7-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 6ef9d6e4c..2a73a182c 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat7-mongodb-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index 9b48f8ae7..a71dfa634 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat7-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 30af703ce..9a05dcbd5 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat7-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index c2843af63..553a30a44 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b8372f374..a5c6c8a56 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat7-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index cd5bb9fa4..61a3208e4 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -401,7 +459,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -525,7 +583,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json
index cb1e49d29..75d08e99d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (no https)"
},
"name": "jws30-tomcat8-basic-s2i"
},
"labels": {
"template": "jws30-tomcat8-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json
index 21d5662c7..71577bec4 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (with https)"
},
"name": "jws30-tomcat8-https-s2i"
},
"labels": {
"template": "jws30-tomcat8-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 34657d826..de86dd83e 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat8-mongodb-persistent-s2i"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"labels": {
"template": "jws30-tomcat8-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index 974cfaddb..6dc85e226 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat8-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat8-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 7a8231cc5..0e96b58a9 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat8-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index cda21f237..08b040863 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat8-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 4dfc98015..f117e6624 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat8-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index f6c85668c..faece1269 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + (PostgreSQL with https)"
},
"name": "jws30-tomcat8-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -400,7 +458,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -524,7 +582,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..6db6e8cc6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (no https)"
+ },
+ "name": "jws31-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-https-s2i.json
new file mode 100644
index 000000000..fd5fca316
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (with https)"
+ },
+ "name": "jws31-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..6bbea8ab8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..a565ee4c0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..be6899958
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..2983cc905
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..cc5ea452c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..bd23e1558
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
@@ -0,0 +1,651 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..f3a5786f6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (no https)"
+ },
+ "name": "jws31-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-https-s2i.json
new file mode 100644
index 000000000..634948a80
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (with https)"
+ },
+ "name": "jws31-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..1ad60d8cc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-persistent-s2i"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..f3e918afc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..08b456440
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..260515b73
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..eef5b6939
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..07ef7218a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
@@ -0,0 +1,649 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + (PostgreSQL with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json
index 143e16756..a48e204ae 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -6,13 +6,14 @@
"iconClass": "icon-jboss",
"description": "Application template for Java applications built using S2I.",
"tags": "java,xpaas",
- "version": "1.0.0"
+ "version": "1.1.0",
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
},
"name": "openjdk18-web-basic-s2i"
},
"labels": {
"template": "openjdk18-web-basic-s2i",
- "xpaas": "1.0.0"
+ "xpaas": "1.4.0"
},
"message": "A new java application has been created in your project.",
"parameters": [
@@ -155,7 +156,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-openjdk18-openshift:1.0"
+ "name": "redhat-openjdk18-openshift:1.1"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
index 1dea463ac..d1705c88c 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Persistent with https)"
},
"name": "processserver63-amq-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,46 +231,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -246,6 +287,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -253,18 +295,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -272,6 +317,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -279,10 +325,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -306,7 +373,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -330,7 +398,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -460,13 +529,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -788,7 +865,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -919,7 +996,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json
index 42264585b..665cb76a3 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
},
"name": "processserver63-amq-mysql-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,46 +217,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -234,6 +273,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -241,18 +281,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -260,6 +303,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -267,10 +311,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -294,7 +359,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -318,7 +384,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -448,13 +515,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -740,7 +815,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -837,7 +912,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
index f6d0c99ed..5a395a0f3 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
},
"name": "processserver63-amq-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,31 +231,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -231,6 +269,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -238,18 +277,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -257,6 +299,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -264,10 +307,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -291,7 +355,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -315,7 +380,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -445,13 +511,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -773,7 +847,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -892,7 +966,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json
index 41c726cf0..e7c5efdc9 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-amq-postgresql-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,31 +217,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -219,6 +255,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -226,18 +263,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -245,6 +285,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -252,10 +293,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -279,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -303,7 +366,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -433,13 +497,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -725,7 +797,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -810,7 +882,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json
index 170c919cb..e70d20a6e 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,javaee,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server (no https)"
},
"name": "processserver63-basic-s2i"
},
"labels": {
"template": "processserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,54 +40,63 @@
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.H2Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -190,13 +220,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json
index 89d0db1a6..f76b07b0b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Persistent with https)"
},
"name": "processserver63-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,46 +189,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -210,6 +245,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -217,6 +253,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -381,13 +441,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json
index 26cab29f8..a3be02eab 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Ephemeral with https)"
},
"name": "processserver63-mysql-s2i"
},
"labels": {
"template": "processserver63-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,46 +182,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -204,6 +238,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -211,6 +246,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -218,10 +254,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -245,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -269,7 +327,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -375,13 +434,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -639,7 +706,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json
index 32a512829..361b177f9 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Persistent with https)"
},
"name": "processserver63-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,31 +189,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,6 +227,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -202,6 +235,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -366,13 +423,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -666,7 +731,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json
index 55e2199bb..451915a1d 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-postgresql-s2i"
},
"labels": {
"template": "processserver63-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,31 +182,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -189,6 +220,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -196,6 +228,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -203,10 +236,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -230,7 +284,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,13 +416,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -624,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
new file mode 100644
index 000000000..293d04d63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
@@ -0,0 +1,1156 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-s2i.json
new file mode 100644
index 000000000..760940b36
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-mysql-s2i.json
@@ -0,0 +1,1034 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..1603bccff
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
@@ -0,0 +1,1126 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-s2i.json
new file mode 100644
index 000000000..422f51c11
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-amq-postgresql-s2i.json
@@ -0,0 +1,1004 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-basic-s2i.json
new file mode 100644
index 000000000..2bf15ff25
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-basic-s2i.json
@@ -0,0 +1,383 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server (no https)"
+ },
+ "name": "processserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "processserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.H2Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..4673dfb0d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-persistent-s2i.json
@@ -0,0 +1,860 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-s2i.json
new file mode 100644
index 000000000..9078f20b8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-mysql-s2i.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..75b6d310e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-persistent-s2i.json
@@ -0,0 +1,830 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-s2i.json
new file mode 100644
index 000000000..51923c0ad
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver64-postgresql-s2i.json
@@ -0,0 +1,753 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json
index fb0578a67..5e956f449 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json
@@ -5,110 +5,129 @@
"annotations": {
"description": "Application template for SSO 7.0",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,java,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
},
"name": "sso70-https"
},
"labels": {
"template": "sso70-https",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -116,54 +135,65 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
@@ -283,10 +313,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json
index dcbb24bf1..0fb2703c7 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Persistent)"
},
"name": "sso70-mysql-persistent"
},
"labels": {
"template": "sso70-mysql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,36 +159,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,58 +202,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -248,7 +295,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +320,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -364,10 +413,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -639,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json
index 1768f7a1b..9beae806b 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Ephemeral)"
},
"name": "sso70-mysql"
},
"labels": {
"template": "sso70-mysql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,30 +159,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -167,58 +195,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -240,10 +286,11 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -268,7 +315,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,7 +397,7 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
}
},
"spec": {
@@ -364,10 +412,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -641,7 +689,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json
index 4c2f81f2e..e22399351 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Persistent)"
},
"name": "sso70-postgresql-persistent"
},
"labels": {
"template": "sso70-postgresql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +141,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,58 +184,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -233,7 +277,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -257,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +395,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -624,7 +670,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json
index d8402ef72..aa8ebaa8e 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Ephemeral)"
},
"name": "sso70-postgresql"
},
"labels": {
"template": "sso70-postgresql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,30 +141,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -152,58 +177,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -228,7 +271,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -253,7 +297,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +394,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -626,7 +671,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-https.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-https.json
new file mode 100644
index 000000000..bee86d7c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-https.json
@@ -0,0 +1,544 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "name": "sso71-https"
+ },
+ "labels": {
+ "template": "sso71-https",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql-persistent.json
new file mode 100644
index 000000000..49b37f348
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql-persistent.json
@@ -0,0 +1,799 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Persistent)"
+ },
+ "name": "sso71-mysql-persistent"
+ },
+ "labels": {
+ "template": "sso71-mysql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql.json
new file mode 100644
index 000000000..634a75bab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-mysql.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Ephemeral)"
+ },
+ "name": "sso71-mysql"
+ },
+ "labels": {
+ "template": "sso71-mysql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql-persistent.json
new file mode 100644
index 000000000..c53bb9d5b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql-persistent.json
@@ -0,0 +1,773 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Persistent)"
+ },
+ "name": "sso71-postgresql-persistent"
+ },
+ "labels": {
+ "template": "sso71-postgresql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql.json
new file mode 100644
index 000000000..c1fc41eda
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso71-postgresql.json
@@ -0,0 +1,741 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Ephemeral)"
+ },
+ "name": "sso71-postgresql"
+ },
+ "labels": {
+ "template": "sso71-postgresql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
deleted file mode 100644
index 14bdd1dca..000000000
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cloudforms
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes-app
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
deleted file mode 100644
index 709d8d976..000000000
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: nfs-pv01
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
deleted file mode 100644
index 34f5fcbcc..000000000
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: 3scale-gateway
- annotations:
- description: "3scale API Gateway"
- iconClass: "icon-load-balancer"
- tags: "api,gateway,3scale"
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- replicas: 2
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 25%
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- labels:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- spec:
- containers:
- - env:
- - name: THREESCALE_PORTAL_ENDPOINT
- valueFrom:
- secretKeyRef:
- name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
- key: password
- - name: THREESCALE_CONFIG_FILE
- value: ${THREESCALE_CONFIG_FILE}
- - name: RESOLVER
- value: ${RESOLVER}
- - name: APICAST_SERVICES
- value: ${APICAST_SERVICES}
- - name: APICAST_MISSING_CONFIGURATION
- value: ${MISSING_CONFIGURATION}
- - name: APICAST_LOG_LEVEL
- value: ${APICAST_LOG_LEVEL}
- - name: APICAST_PATH_ROUTING_ENABLED
- value: ${PATH_ROUTING}
- - name: APICAST_RESPONSE_CODES
- value: ${RESPONSE_CODES}
- - name: APICAST_REQUEST_LOGS
- value: ${REQUEST_LOGS}
- - name: APICAST_RELOAD_CONFIG
- value: ${APICAST_RELOAD_CONFIG}
- image: ${THREESCALE_GATEWAY_IMAGE}
- imagePullPolicy: Always
- name: ${THREESCALE_GATEWAY_NAME}
- livenessProbe:
- httpGet:
- path: /status/live
- port: 8090
- initialDelaySeconds: 10
- timeoutSeconds: 1
- readinessProbe:
- httpGet:
- path: /status/ready
- port: 8090
- initialDelaySeconds: 15
- timeoutSeconds: 1
- ports:
- - containerPort: 8080
- protocol: TCP
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- securityContext: {}
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status: {}
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
-parameters:
-- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
- value: threescale-portal-endpoint-secret
- name: THREESCALE_PORTAL_ENDPOINT_SECRET
- required: true
-- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
- value:
- name: THREESCALE_CONFIG_FILE
- required: false
-- description: "Name for the 3scale API Gateway"
- value: threescalegw
- name: THREESCALE_GATEWAY_NAME
- required: true
-- description: "Docker image to use."
- value: 'rhamp10/apicast-gateway:1.0.0-4'
- name: THREESCALE_GATEWAY_IMAGE
- required: true
-- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
- value:
- name: RESOLVER
- required: false
-- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
- value:
- name: APICAST_SERVICES
- required: false
-- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
- value: exit
- required: false
- name: MISSING_CONFIGURATION
-- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
- name: APICAST_LOG_LEVEL
- required: false
-- description: "Enable path routing. Experimental feature."
- name: PATH_ROUTING
- required: false
- value: "false"
-- description: "Enable traffic logging to 3scale. Includes whole request and response."
- value: "false"
- name: REQUEST_LOGS
- required: false
-- description: "Enable logging response codes to 3scale."
- value: "false"
- name: RESPONSE_CODES
- required: false
-- description: "Reload config on every request"
- value: "false"
- name: APICAST_RELOAD_CONFIG
- required: false
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
deleted file mode 100644
index 049f3f884..000000000
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
+++ /dev/null
@@ -1,397 +0,0 @@
-{
- "kind": "List",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-image-streams",
- "annotations": {
- "description": "ImageStream definitions for JBoss Middleware products."
- }
- },
- "items": [
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-webserver30-tomcat7-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
- "tags": [
- {
- "name": "1.1",
- "annotations": {
- "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
- }
- },
- {
- "name": "1.2",
- "annotations": {
- "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
- "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-webserver30-tomcat8-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
- "tags": [
- {
- "name": "1.1",
- "annotations": {
- "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "tomcat-websocket-chat",
- "version": "1.1"
- }
- },
- {
- "name": "1.2",
- "annotations": {
- "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
- "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "tomcat-websocket-chat",
- "version": "1.2"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-eap64-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
- "tags": [
- {
- "name": "1.1",
- "annotations": {
- "description": "JBoss EAP 6.4 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "6.4.x",
- "version": "1.1"
- }
- },
- {
- "name": "1.2",
- "annotations": {
- "description": "JBoss EAP 6.4 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "6.4.x",
- "version": "1.2"
- }
- },
- {
- "name": "1.3",
- "annotations": {
- "description": "JBoss EAP 6.4 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.3",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "6.4.x",
- "version": "1.3"
- }
- },
- {
- "name": "1.4",
- "annotations": {
- "description": "JBoss EAP 6.4 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:6.4,javaee:6,java:8,xpaas:1.4",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "6.4.x",
- "version": "1.4"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-eap70-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
- "tags": [
- {
- "name": "1.3",
- "annotations": {
- "description": "JBoss EAP 7.0 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "7.0.0.GA",
- "version": "1.3"
- }
- },
- {
- "name": "1.4",
- "annotations": {
- "description": "JBoss EAP 7.0 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,eap,javaee,java,jboss,xpaas",
- "supports":"eap:7.0,javaee:7,java:8,xpaas:1.4",
- "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
- "sampleContextDir": "kitchensink",
- "sampleRef": "7.0.0.GA",
- "version": "1.4"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-decisionserver62-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
- "tags": [
- {
- "name": "1.2",
- "annotations": {
- "description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.2,java:8,xpaas:1.2",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "decisionserver/hellorules",
- "sampleRef": "1.2",
- "version": "1.2"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-decisionserver63-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
- "tags": [
- {
- "name": "1.3",
- "annotations": {
- "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,decisionserver,java,xpaas",
- "supports":"decisionserver:6.3,java:8,xpaas:1.3",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "decisionserver/hellorules",
- "sampleRef": "1.3",
- "version": "1.3"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-processserver63-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
- "tags": [
- {
- "name": "1.3",
- "annotations": {
- "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,processserver,java,xpaas",
- "supports":"processserver:6.3,java:8,xpaas:1.3",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
- "sampleContextDir": "processserver/library",
- "sampleRef": "1.3",
- "version": "1.3"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-datagrid65-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
- "tags": [
- {
- "name": "1.2",
- "annotations": {
- "description": "JBoss Data Grid 6.5 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "datagrid,java,jboss,xpaas",
- "supports":"datagrid:6.5,java:8,xpaas:1.2",
- "version": "1.2"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-datavirt63-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
- "tags": [
- {
- "name": "1.0",
- "annotations": {
- "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
- "iconClass": "icon-jboss",
- "tags": "datavirt,java,jboss,xpaas",
- "supports":"datavirt:6.3,java:8,xpaas:1.4",
- "version": "1.0"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "jboss-amq-62"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
- "tags": [
- {
- "name": "1.1",
- "annotations": {
- "description": "JBoss A-MQ 6.2 broker image.",
- "iconClass": "icon-jboss",
- "tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.1",
- "version": "1.1"
- }
- },
- {
- "name": "1.2",
- "annotations": {
- "description": "JBoss A-MQ 6.2 broker image.",
- "iconClass": "icon-jboss",
- "tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.2",
- "version": "1.2"
- }
- },
- {
- "name": "1.3",
- "annotations": {
- "description": "JBoss A-MQ 6.2 broker image.",
- "iconClass": "icon-jboss",
- "tags": "messaging,amq,jboss,xpaas",
- "supports":"amq:6.2,messaging,xpaas:1.3",
- "version": "1.3"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "redhat-sso70-openshift",
- "annotations": {
- "description": "Red Hat SSO 7.0"
- }
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso70-openshift",
- "tags": [
- {
- "name": "1.3",
- "annotations": {
- "description": "Red Hat SSO 7.0",
- "iconClass": "icon-jboss",
- "tags": "sso,keycloak,redhat",
- "supports":"sso:7.0,xpaas:1.3",
- "version": "1.3"
- }
- }
- ]
- }
- },
- {
- "kind": "ImageStream",
- "apiVersion": "v1",
- "metadata": {
- "name": "redhat-openjdk18-openshift"
- },
- "spec": {
- "dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
- "tags": [
- {
- "name": "1.0",
- "annotations": {
- "openshift.io/display-name": "Red Hat OpenJDK 8",
- "description": "Build and run Java applications using Maven and OpenJDK 8.",
- "iconClass": "icon-jboss",
- "tags": "builder,java,xpaas,openjdk",
- "supports":"java:8,xpaas:1.0",
- "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
- "sampleContextDir": "undertow-servlet",
- "version": "1.0"
- }
- }
- ]
- }
- }
- ]
-}
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..250a99b8d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv01
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
new file mode 100644
index 000000000..cba9bbe35
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv02
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..c08c21265
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv03
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 4f25a9c8f..3bc6c5813 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
@@ -17,6 +17,7 @@ objects:
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
name: ${NAME}
spec:
+ clusterIP: None
ports:
- name: http
port: 80
@@ -48,11 +49,27 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms app image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${DATABASE_SERVICE_NAME}
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- ReadWriteOnce
@@ -62,45 +79,41 @@ objects:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${NAME}
+ name: "${NAME}-region"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
- storage: ${APPLICATION_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: "DeploymentConfig"
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+- apiVersion: apps/v1beta1
+ kind: "StatefulSet"
metadata:
name: ${NAME}
annotations:
description: "Defines how to deploy the CloudForms appliance"
spec:
+ serviceName: "${NAME}"
+ replicas: 1
template:
metadata:
labels:
name: ${NAME}
name: ${NAME}
spec:
- volumes:
- -
- name: "cfme-app-volume"
- persistentVolumeClaim:
- claimName: ${NAME}
containers:
- - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
- imagePullPolicy: IfNotPresent
- name: cloudforms
+ - name: cloudforms
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
livenessProbe:
- httpGet:
- path: /
- port: 80
+ tcpSocket:
+ port: 443
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /
- port: 80
+ port: 443
+ scheme: HTTPS
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -112,8 +125,11 @@ objects:
privileged: true
volumeMounts:
-
- name: "cfme-app-volume"
+ name: "${NAME}-server"
mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
env:
-
name: "APPLICATION_INIT_DELAY"
@@ -144,29 +160,32 @@ objects:
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
requests:
- memory: "${MEMORY_APPLICATION_MIN}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
lifecycle:
preStop:
exec:
command:
- /opt/rh/cfme-container-scripts/sync-pv-data
- replicas: 1
- selector:
- name: ${NAME}
- triggers:
- - type: "ConfigChange"
- - type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "cloudforms"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
- strategy:
- type: "Recreate"
- recreateParams:
- timeoutSeconds: 1200
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -182,14 +201,6 @@ objects:
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -223,7 +234,7 @@ objects:
containers:
-
name: "memcached"
- image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
ports:
-
containerPort: 11211
@@ -249,8 +260,11 @@ objects:
name: "MEMCACHED_SLAB_PAGE_SIZE"
value: "${MEMCACHED_SLAB_PAGE_SIZE}"
resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
limits:
- memory: "${MEMORY_MEMCACHED_LIMIT}"
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -266,14 +280,6 @@ objects:
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${DATABASE_SERVICE_NAME}"
@@ -307,11 +313,11 @@ objects:
-
name: "cfme-pgdb-volume"
persistentVolumeClaim:
- claimName: ${DATABASE_SERVICE_NAME}
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
containers:
-
name: "postgresql"
- image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
ports:
-
containerPort: 5432
@@ -350,8 +356,11 @@ objects:
name: "POSTGRESQL_SHARED_BUFFERS"
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
limits:
- memory: "${MEMORY_POSTGRESQL_LIMIT}"
+ memory: "${POSTGRESQL_MEM_LIMIT}"
parameters:
-
@@ -420,36 +429,87 @@ parameters:
name: "POSTGRESQL_SHARED_BUFFERS"
displayName: "PostgreSQL Shared Buffer Amount"
description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "64MB"
+ value: "256MB"
-
- name: "MEMORY_APPLICATION_MIN"
- displayName: "Application Memory Minimum"
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
required: true
description: "Minimum amount of memory the Application container will need."
- value: "4096Mi"
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
-
- name: "MEMORY_POSTGRESQL_LIMIT"
- displayName: "PostgreSQL Memory Limit"
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
required: true
- description: "Maximum amount of memory the PostgreSQL container can use."
- value: "2048Mi"
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
-
- name: "MEMORY_MEMCACHED_LIMIT"
- displayName: "Memcached Memory Limit"
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
required: true
- description: "Maximum amount of memory the Memcached container can use."
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
value: "256Mi"
-
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
+ -
name: "POSTGRESQL_IMG_TAG"
displayName: "PostgreSQL Image Tag"
description: "This is the PostgreSQL image tag/version requested to deploy."
value: "latest"
-
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
+ -
name: "MEMCACHED_IMG_TAG"
displayName: "Memcached Image Tag"
description: "This is the Memcached image tag/version requested to deploy."
value: "latest"
-
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
+ -
name: "APPLICATION_IMG_TAG"
displayName: "Application Image Tag"
description: "This is the Application image tag/version requested to deploy."
@@ -464,16 +524,22 @@ parameters:
displayName: "Application Init Delay"
required: true
description: "Delay in seconds before we attempt to initialize the application."
- value: "30"
+ value: "15"
-
name: "APPLICATION_VOLUME_CAPACITY"
displayName: "Application Volume Capacity"
required: true
description: "Volume space available for application data."
- value: "1Gi"
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
-
name: "DATABASE_VOLUME_CAPACITY"
displayName: "Database Volume Capacity"
required: true
description: "Volume space available for database."
- value: "1Gi"
+ value: "15Gi"
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
index a36d7ba7d..a36d7ba7d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..536f7275e 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
index 6ed744777..3b7fdccce 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..ee274194f 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
index 0656219fb..e5ba43669 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
index d60b4647d..969e62ac5 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -36,7 +41,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
index c2bfa40fd..4f39d41a5 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..c37102cb0 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
index 242212d6f..32dc93a95 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
index e9af50937..6bb683e52 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
index aa27578a9..9e8be2309 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
index 857ffa980..857ffa980 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index 1a90a9409..6cef21945 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/httpd-24-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +468,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +484,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +500,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +516,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +532,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -800,7 +861,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index eb94c3bb4..abdae01e3 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/httpd-24-rhel7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
@@ -103,7 +148,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +182,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +314,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
@@ -707,7 +768,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
index f48d8d4a8..6d2ccbf7f 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
@@ -17,6 +17,7 @@ instantiating them.
* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex).
* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
new file mode 100644
index 000000000..4e469f6e8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
@@ -0,0 +1,1261 @@
+base_env: &base_env
+- name: RAILS_ENV
+ value: "production"
+- name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+- name: FORCE_SSL
+ value: "true"
+- name: THREESCALE_SUPERDOMAIN
+ value: "${WILDCARD_DOMAIN}"
+- name: TENANT_NAME
+ value: "${TENANT_NAME}"
+- name: APICAST_ACCESS_TOKEN
+ value: "${APICAST_ACCESS_TOKEN}"
+- name: ADMIN_ACCESS_TOKEN
+ value: "${ADMIN_ACCESS_TOKEN}"
+- name: PROVIDER_PLAN
+ value: 'enterprise'
+- name: USER_LOGIN
+ value: "${ADMIN_USERNAME}"
+- name: USER_PASSWORD
+ value: "${ADMIN_PASSWORD}"
+- name: RAILS_LOG_TO_STDOUT
+ value: "true"
+- name: RAILS_LOG_LEVEL
+ value: "info"
+- name: THINKING_SPHINX_ADDRESS
+ value: "system-sphinx"
+- name: THINKING_SPHINX_PORT
+ value: "9306"
+- name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "/tmp/sphinx.conf"
+- name: EVENTS_SHARED_SECRET
+ value: "${SYSTEM_BACKEND_SHARED_SECRET}"
+- name: THREESCALE_SANDBOX_PROXY_OPENSSL_VERIFY_MODE
+ value: "VERIFY_NONE"
+- name: APICAST_BACKEND_ROOT_ENDPOINT
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+- name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+- name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+- name: SECRET_KEY_BASE
+ value: "${SYSTEM_APP_SECRET_KEY_BASE}"
+- name: AMP_RELEASE
+ value: "${AMP_RELEASE}"
+- name: SMTP_ADDRESS
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: address
+- name: SMTP_USER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: username
+- name: SMTP_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: password
+- name: SMTP_DOMAIN
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: domain
+- name: SMTP_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: port
+- name: SMTP_AUTHENTICATION
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: authentication
+- name: SMTP_OPENSSL_VERIFY_MODE
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: openssl.verify.mode
+- name: BACKEND_ROUTE
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: "system"
+message: "Login on https://${TENANT_NAME}-admin.${WILDCARD_DOMAIN} as ${ADMIN_USERNAME}/${ADMIN_PASSWORD}"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-cron
+ spec:
+ replicas: 1
+ selector:
+ name: backend-cron
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-cron
+ spec:
+ containers:
+ - args:
+ - backend-cron
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-cron
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-redis
+ spec:
+ replicas: 1
+ selector:
+ name: backend-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: backend-redis
+ spec:
+ containers:
+ - image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: backend-redis
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ volumeMounts:
+ - name: backend-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ volumes:
+ - name: backend-redis-storage
+ persistentVolumeClaim:
+ claimName: backend-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-listener
+ spec:
+ replicas: 1
+ selector:
+ name: backend-listener
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-listener
+ spec:
+ containers:
+ - args:
+ - 3scale_backend
+ - start
+ - "-e"
+ - production
+ - "-p"
+ - '3000'
+ - "-x"
+ - "/dev/stdout"
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+ - name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-listener
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3000
+ readinessProbe:
+ httpGet:
+ path: "/status"
+ port: 3000
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ selector:
+ name: backend-redis
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-listener
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: 3000
+ name: http
+ selector:
+ name: backend-listener
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-provider
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: provider
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-developer
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: developer
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-worker
+ spec:
+ replicas: 1
+ selector:
+ name: backend-worker
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-worker
+ spec:
+ containers:
+ - args:
+ - 3scale_backend_worker
+ - run
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_EVENTS_HOOK
+ value: http://system-provider:3000/master/events/import
+ - name: CONFIG_EVENTS_HOOK_SHARED_SECRET
+ value: ${SYSTEM_BACKEND_SHARED_SECRET}
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-worker
+ triggers:
+ - type: ConfigChange
+
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ ports:
+ - name: system-mysql
+ protocol: TCP
+ port: 3306
+ targetPort: 3306
+ nodePort: 0
+ selector:
+ name: 'system-mysql'
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ name: redis
+ selector:
+ name: system-redis
+
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-redis
+ spec:
+ replicas: 1
+ selector:
+ name: system-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: system-redis
+ spec:
+ containers:
+ - args:
+ image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: system-redis
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - name: system-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: system-redis-storage
+ persistentVolumeClaim:
+ claimName: system-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-sphinx
+ spec:
+ ports:
+ - port: 9306
+ protocol: TCP
+ targetPort: 9306
+ name: sphinx
+ selector:
+ name: system-sphinx
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sphinx
+ spec:
+ replicas: 1
+ selector:
+ name: system-sphinx
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sphinx
+ spec:
+ volumes:
+ - name: system-sphinx-database
+ emptyDir: {}
+ containers:
+ - args:
+ - rake
+ - 'openshift:thinking_sphinx:start'
+ volumeMounts:
+ - name: system-sphinx-database
+ mountPath: "/opt/system/db/sphinx"
+ env:
+ - name: RAILS_ENV
+ value: production
+ - name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+ - name: THINKING_SPHINX_ADDRESS
+ value: 0.0.0.0
+ - name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "db/sphinx/production.conf"
+ - name: THINKING_SPHINX_PID_FILE
+ value: db/sphinx/searchd.pid
+ - name: DELTA_INDEX_INTERVAL
+ value: '5'
+ - name: FULL_REINDEX_INTERVAL
+ value: '60'
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sphinx
+ livenessProbe:
+ tcpSocket:
+ port: 9306
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-memcache
+ spec:
+ ports:
+ - port: 11211
+ protocol: TCP
+ targetPort: 11211
+ name: memcache
+ selector:
+ name: system-memcache
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-memcache
+ spec:
+ replicas: 1
+ selector:
+ name: system-memcache
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-memcache
+ spec:
+ containers:
+ - args:
+ env:
+ image: 3scale-amp20/memcached:1.4.15-7
+ imagePullPolicy: IfNotPresent
+ name: memcache
+ readinessProbe:
+ exec:
+ command:
+ - "sh"
+ - "-c"
+ - "echo version | nc $HOSTNAME 11211 | grep VERSION"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 11211
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ command:
+ - "memcached"
+ - "-m"
+ - "64"
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-provider-admin-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}-admin.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-provider
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: backend-route
+ labels:
+ app: system-route
+ spec:
+ host: backend-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: backend-listener
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-developer-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-developer
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-staging
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-staging
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-staging
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: http://${APICAST_ACCESS_TOKEN}@system-provider:3000
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "sandbox"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/2"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-staging
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-staging
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-staging
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-production
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-production
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-production
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: "http://${APICAST_ACCESS_TOKEN}@system-provider:3000"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "boot"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "300"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "production"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/1"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-production
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-production
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-production
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-staging-route
+ labels:
+ app: apicast-staging
+ spec:
+ host: api-${TENANT_NAME}-apicast-staging.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-staging
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-production-route
+ labels:
+ app: apicast-production
+ spec:
+ host: api-${TENANT_NAME}-apicast-production.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-production
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-app
+ spec:
+ replicas: 1
+ selector:
+ name: system-app
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ pre:
+ failurePolicy: Retry
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:deploy
+ env: *base_env
+ volumes:
+ - system-storage
+ post:
+ failurePolicy: Abort
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:post_deploy
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-app
+ spec:
+ containers:
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ command: ['env', 'TENANT_MODE=provider', 'PORT=3000', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ name: system-provider
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: provider
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: provider
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ name: provider
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ command: ['env', 'TENANT_MODE=developer', 'PORT=3001', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ imagePullPolicy: IfNotPresent
+ name: system-developer
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: developer
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: developer
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3001
+ protocol: TCP
+ name: developer
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ readOnly: true
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-resque
+ spec:
+ replicas: 1
+ selector:
+ name: system-resque
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-resque
+ spec:
+ containers:
+ - args:
+ - 'rake'
+ - 'resque:work'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-resque
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ - 'rake'
+ - 'resque:scheduler'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-scheduler
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sidekiq
+ spec:
+ replicas: 1
+ selector:
+ name: system-sidekiq
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sidekiq
+ spec:
+ containers:
+ - args:
+ - rake
+ - sidekiq:worker
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sidekiq
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: 'system-mysql'
+ template:
+ metadata:
+ labels:
+ name: 'system-mysql'
+ spec:
+ containers:
+ - name: system-mysql
+ image: ${MYSQL_IMAGE}
+ ports:
+ - containerPort: 3306
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ readinessProbe:
+ timeoutSeconds: 5
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ exec:
+ command:
+ - /bin/sh
+ - '-i'
+ - '-c'
+ - MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3306
+ env:
+ - name: MYSQL_USER
+ value: ${MYSQL_USER}
+ - name: MYSQL_PASSWORD
+ value: ${MYSQL_PASSWORD}
+ - name: MYSQL_DATABASE
+ value: ${MYSQL_DATABASE}
+ - name: MYSQL_ROOT_PASSWORD
+ value: ${MYSQL_ROOT_PASSWORD}
+ - name: MYSQL_LOWER_CASE_TABLE_NAMES
+ value: "1"
+ volumeMounts:
+ - name: 'mysql-storage'
+ mountPath: /var/lib/mysql/data
+ imagePullPolicy: IfNotPresent
+ volumes:
+ - name: 'mysql-storage'
+ persistentVolumeClaim:
+ claimName: 'mysql-storage'
+- kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: redis-config
+ data:
+ redis.conf: |
+ protected-mode no
+
+ port 6379
+
+ timeout 0
+ tcp-keepalive 300
+
+ daemonize no
+ supervised no
+
+ loglevel notice
+
+ databases 16
+
+ save 900 1
+ save 300 10
+ save 60 10000
+
+ stop-writes-on-bgsave-error yes
+
+ rdbcompression yes
+ rdbchecksum yes
+
+ dbfilename dump.rdb
+
+ slave-serve-stale-data yes
+ slave-read-only yes
+
+ repl-diskless-sync no
+ repl-disable-tcp-nodelay no
+
+ appendonly yes
+ appendfilename "appendonly.aof"
+ appendfsync everysec
+ no-appendfsync-on-rewrite no
+ auto-aof-rewrite-percentage 100
+ auto-aof-rewrite-min-size 64mb
+ aof-load-truncated yes
+
+ lua-time-limit 5000
+
+ activerehashing no
+
+ aof-rewrite-incremental-fsync yes
+ dir /var/lib/redis/data
+
+- kind: ConfigMap
+
+ apiVersion: v1
+ metadata:
+ name: smtp
+ data:
+ address: ""
+ username: ""
+ password: ""
+ domain: ""
+ port: ""
+ authentication: ""
+ openssl.verify.mode: ""
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: ADMIN_PASSWORD
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+- name: ADMIN_USERNAME
+ value: admin
+ required: true
+- name: APICAST_ACCESS_TOKEN
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+ description: "Read Only Access Token that is APIcast going to use to download its configuration."
+- name: ADMIN_ACCESS_TOKEN
+ required: false
+ generate: expression
+ from: "[a-z0-9]{16}"
+ description: "Admin Access Token with all scopes and write permissions for API access."
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Tenant name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
+- name: MYSQL_USER
+ displayName: MySQL User
+ description: Username for MySQL user that will be used for accessing the database.
+ value: "mysql"
+ required: true
+- name: MYSQL_PASSWORD
+ displayName: MySQL Password
+ description: Password for the MySQL user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: MYSQL_DATABASE
+ displayName: MySQL Database Name
+ description: Name of the MySQL database accessed.
+ value: "system"
+ required: true
+- name: MYSQL_ROOT_PASSWORD
+ displayName: MySQL Root password.
+ description: Password for Root user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_BACKEND_USERNAME
+ description: Internal 3scale API username for internal 3scale api auth.
+ value: "3scale_api_user"
+ required: true
+- name: SYSTEM_BACKEND_PASSWORD
+ description: Internal 3scale API password for internal 3scale api auth.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: REDIS_IMAGE
+ description: Redis image to use
+ required: true
+ value: rhscl/redis-32-rhel7:3.2-5.7
+- name: MYSQL_IMAGE
+ description: Mysql image to use
+ required: true
+ value: rhscl/mysql-56-rhel7:5.6-13.14
+- name: SYSTEM_BACKEND_SHARED_SECRET
+ description: Shared secret to import events from backend to system.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_APP_SECRET_KEY_BASE
+ description: System application secret key base
+ generate: expression
+ from: "[a-f0-9]{128}"
+ required: true
+- name: APICAST_MANAGEMENT_API
+ description: "Scope of the APIcast Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: APICAST_OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification when downloading the configuration. Can be set to true/false."
+ required: false
+ value: "false"
+- name: APICAST_RESPONSE_CODES
+ description: "Enable logging response codes in APIcast."
+ value: "true"
+ required: false
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
new file mode 100644
index 000000000..8e8051c0b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: "${APICAST_NAME}"
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: "${CONFIGURATION_URL_SECRET}"
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: "${CONFIGURATION_FILE_PATH}"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "${DEPLOYMENT_ENVIRONMENT}"
+ - name: RESOLVER
+ value: "${RESOLVER}"
+ - name: APICAST_SERVICES
+ value: "${SERVICES_LIST}"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "${CONFIGURATION_LOADER}"
+ - name: APICAST_LOG_LEVEL
+ value: "${LOG_LEVEL}"
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: "${PATH_ROUTING}"
+ - name: APICAST_RESPONSE_CODES
+ value: "${RESPONSE_CODES}"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "${CONFIGURATION_CACHE}"
+ - name: REDIS_URL
+ value: "${REDIS_URL}"
+ - name: APICAST_MANAGEMENT_API
+ value: "${MANAGEMENT_API}"
+ - name: OPENSSL_VERIFY
+ value: "${OPENSSL_VERIFY}"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: "${APICAST_NAME}"
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: management
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - name: proxy
+ containerPort: 8080
+ protocol: TCP
+ - name: management
+ containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ ports:
+ - name: proxy
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: apicast-configuration-url-secret
+ name: CONFIGURATION_URL_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: CONFIGURATION_FILE_PATH
+ required: false
+- description: "Deployment environment. Can be sandbox or production."
+ value: production
+ name: DEPLOYMENT_ENVIRONMENT
+ required: true
+- description: "Name for the 3scale API Gateway"
+ value: apicast
+ name: APICAST_NAME
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: SERVICES_LIST
+ required: false
+- name: CONFIGURATION_LOADER
+ description: "When to load configuration. If on gateway start or incoming request. Allowed values are: lazy, boot."
+ value: boot
+ required: false
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- name: CONFIGURATION_CACHE
+ description: "For how long to cache the downloaded configuration in seconds. Can be left empty, 0 or greater than 60."
+ value: ""
+ required: false
+- description: "Redis URL. Required for OAuth2 integration. ex: redis://PASSWORD@127.0.0.1:6379/0"
+ name: REDIS_URL
+ required: false
+- name: MANAGEMENT_API
+ description: "Scope of the Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification. Can be set to true/false."
+ required: true
+ value: "false"
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..6d987ee33 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -510,7 +513,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
index da2454d2e..fb2ef206e 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -484,7 +487,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..7ffb25e14 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -472,7 +478,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 7a285dba8..d787e376b 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -147,6 +150,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
@@ -446,7 +452,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..a2070207b 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -479,7 +482,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 7bee85ddd..0d33c6e0e 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -148,7 +151,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -453,7 +456,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
new file mode 100644
index 000000000..af46579c8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
@@ -0,0 +1,333 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-example",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Example",
+ "description": "An example .NET Core application.",
+ "tags": "quickstart,dotnet,.net",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-app"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-app",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 15
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dotnet-example"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.0"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "dotnetcore-1.0"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to use a subdirectory of the source code repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "app"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish.",
+ "value": "bower gulp"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
new file mode 100644
index 000000000..a2b59c2d3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
@@ -0,0 +1,565 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-pgsql-persistent",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core + PostgreSQL (Persistent)",
+ "description": "An example .NET Core application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "tags": "quickstart,dotnet",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "labels": {
+ "template": "dotnet-pgsql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-pgsql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-pgsql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "ConnectionString",
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 10
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "musicstore"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the .NET Core container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.1"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the .NET builder ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/redhat-developer/s2i-aspnet-musicstore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "rel/1.1-example"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "samples/MusicStore"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish."
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "musicstore"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
new file mode 100644
index 000000000..ac671cc06
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
@@ -0,0 +1,274 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "httpd-example",
+ "annotations": {
+ "openshift.io/display-name": "Httpd",
+ "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "tags": "quickstart,httpd",
+ "iconClass": "icon-apache",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "labels": {
+ "template": "httpd-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "httpd:2.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "httpd-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "httpd-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": [
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "httpd-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/httpd-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the httpd service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
index 264e4b2de..ce96684a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
index b47bdf353..34b2b920b 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..a9c365361 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -491,7 +494,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..53a6147d5 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
@@ -154,7 +157,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
@@ -467,7 +470,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
new file mode 100644
index 000000000..0bbb8e625
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-pvc"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..f07a43071 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -526,7 +533,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
index fa67412ff..a7992c988 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -500,7 +507,7 @@
{
"name": "GITHUB_WEBHOOK_SECRET",
"displayName": "GitHub Webhook Secret",
- "description": "A secret string used to configure the GitHub webhook.",
+ "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.",
"generate": "expression",
"from": "[a-zA-Z0-9]{40}"
},
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
new file mode 100644
index 000000000..00dedecd5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
@@ -0,0 +1,158 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-apicast-wildcard-router"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-router
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-router
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-router
+ spec:
+ volumes:
+ - name: apicast-router-config
+ configMap:
+ name: apicast-router-config
+ items:
+ - key: router.conf
+ path: router.conf
+ containers:
+ - env:
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-router
+ command: ['bin/apicast']
+ livenessProbe:
+ tcpSocket:
+ port: router
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 30
+ volumeMounts:
+ - name: apicast-router-config
+ mountPath: /opt/app-root/src/sites.d/
+ readOnly: true
+ ports:
+ - containerPort: 8082
+ name: router
+ protocol: TCP
+ - containerPort: 8090
+ name: management
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-router
+ spec:
+ ports:
+ - name: router
+ port: 80
+ protocol: TCP
+ targetPort: router
+ selector:
+ deploymentconfig: apicast-router
+
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: apicast-router-config
+ data:
+ router.conf: |-
+ upstream wildcard {
+ server 0.0.0.1:1;
+
+ balancer_by_lua_block {
+ local round_robin = require 'resty.balancer.round_robin'
+ local balancer = round_robin.new()
+ local peers = balancer:peers(ngx.ctx.apicast)
+
+ local peer, err = balancer:set_peer(peers)
+
+ if not peer then
+ ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
+ ngx.log(ngx.ERR, "failed to set current backend peer: ", err)
+ ngx.exit(ngx.status)
+ end
+ }
+
+ keepalive 1024;
+ }
+
+ server {
+ listen 8082;
+ server_name ~-(?<apicast>apicast-(staging|production))\.;
+ access_log /dev/stdout combined;
+
+ location / {
+ access_by_lua_block {
+ local resolver = require('resty.resolver'):instance()
+ local servers = resolver:get_servers(ngx.var.apicast, { port = 8080 })
+
+ if #servers == 0 then
+ ngx.status = ngx.HTTP_BAD_GATEWAY
+ ngx.exit(ngx.HTTP_OK)
+ end
+
+ ngx.ctx.apicast = servers
+ }
+ proxy_http_version 1.1;
+ proxy_pass $scheme://wildcard;
+ proxy_set_header Host $host;
+ proxy_set_header Connection "";
+ }
+ }
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: apicast-wildcard-router
+ labels:
+ app: apicast-wildcard-router
+ spec:
+ host: apicast-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-router
+ port:
+ targetPort: router
+ wildcardPolicy: Subdomain
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Domain name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
index 9d99973be..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
new file mode 100644
index 000000000..0bb56452b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
@@ -0,0 +1,822 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Middleware products."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports": "tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports": "tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver31-tomcat7-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports": "tomcat7:3.1,tomcat:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver31-tomcat8-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Web Server 3.1 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports": "tomcat8:3.1,tomcat:8,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:6.4,javaee:6,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.5"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap70-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports": "eap:7.0,javaee:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.5",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.5"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver62-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.2,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,xpaas",
+ "supports": "decisionserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.3,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.4 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-processserver63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,xpaas",
+ "supports": "processserver:6.3,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.3,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-processserver64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.4 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.4,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:6.5,xpaas:1.4",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports":"datagrid:6.5,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-client-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 Client Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports": "datavirt:6.3,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,jboss,xpaas",
+ "supports":"datavirt:6.3,xpaas:1.4",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-driver-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-62",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.2,messaging,xpaas:1.2",
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.4",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-63",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss A-MQ 6.3 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.3,messaging,xpaas:1.0",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-sso70-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.0,xpaas:1.3",
+ "version": "1.3",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.0,xpaas:1.4",
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-sso71-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso71-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "Red Hat SSO 7.1",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports": "sso:7.1,xpaas:1.4",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports": "java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.0"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports": "java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.1"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
index ab35afead..af20b373a 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral, no SSL)"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,18 +69,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -171,7 +183,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -202,7 +215,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
index c12f06dec..5acdbfabf 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent with SSL)"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,48 +83,56 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -309,7 +328,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -340,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
index 897ce0395..b8089cd6d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
@@ -6,58 +6,68 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent, no SSL)"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -65,6 +75,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -72,18 +83,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -183,7 +197,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire port."
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
}
}
},
@@ -214,7 +229,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
index 97d110286..b52fdbfb0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
@@ -6,46 +6,54 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.3.1"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral with SSL)"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.3.1"
+ "xpaas": "1.4.0"
},
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "broker",
"required": true
},
{
+ "displayName": "A-MQ Protocols",
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -53,6 +61,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,48 +69,56 @@
"required": false
},
{
+ "displayName": "Secret Name",
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
"required": true
},
{
+ "displayName": "Trust Store Filename",
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
"value": "broker.ts",
"required": true
},
{
+ "displayName": "Trust Store Password",
"description": "SSL trust store password",
"name": "AMQ_TRUSTSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Keystore Filename",
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
"value": "broker.ks",
"required": true
},
{
+ "displayName": "A-MQ Keystore Password",
"description": "Password for accessing SSL keystore",
"name": "AMQ_KEYSTORE_PASSWORD",
"value": "",
"required": true
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -297,7 +314,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's OpenWire (SSL) port."
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
}
}
},
@@ -328,7 +346,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-basic.json
new file mode 100644
index 000000000..d29f6a300
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-basic.json
@@ -0,0 +1,334 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral, no SSL)"
+ },
+ "name": "amq63-basic"
+ },
+ "labels": {
+ "template": "amq63-basic",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent-ssl.json
new file mode 100644
index 000000000..47f6396dd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent-ssl.json
@@ -0,0 +1,569 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent with SSL)"
+ },
+ "name": "amq63-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq63-persistent-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent.json
new file mode 100644
index 000000000..4b64203c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-persistent.json
@@ -0,0 +1,386 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent, no SSL)"
+ },
+ "name": "amq63-persistent"
+ },
+ "labels": {
+ "template": "amq63-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-ssl.json
new file mode 100644
index 000000000..20ad50016
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq63-ssl.json
@@ -0,0 +1,521 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral with SSL)"
+ },
+ "name": "amq63-ssl"
+ },
+ "labels": {
+ "template": "amq63-ssl",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Filename",
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "displayName": "Trust Store Password",
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Filename",
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Keystore Password",
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-amqp-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-mqtt-ssl\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-stomp-ssl\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
index 56e76016f..32433bef0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
@@ -6,76 +6,103 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral, no https)"
},
"name": "datagrid65-basic"
},
"labels": {
"template": "datagrid65-basic",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -200,7 +227,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -267,9 +294,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -302,6 +334,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
index 639ac2e11..e6f020400 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
@@ -6,130 +6,166 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 (Ephemeral with https)"
},
"name": "datagrid65-https"
},
"labels": {
"template": "datagrid65-https",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -301,7 +337,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -386,9 +422,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -437,6 +478,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
index 22ca3f0a0..ff57a7936 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Persistent with https)"
},
"name": "datagrid65-mysql-persistent"
},
"labels": {
"template": "datagrid65-mysql-persistent",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,117 +111,158 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -230,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +311,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +336,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -302,7 +361,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -397,7 +457,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -482,9 +542,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -585,6 +650,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -669,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
index e1a585d24..44902de25 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and MySQL applications.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + MySQL (Ephemeral with https)"
},
"name": "datagrid65-mysql"
},
"labels": {
"template": "datagrid65-mysql",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using MySQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:/jboss/datasources/mysql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,111 +111,151 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -224,7 +279,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -248,7 +304,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +329,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Memcached service for clustered applications."
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -296,7 +354,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Hot Rod service for clustered applications."
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -391,7 +450,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -476,9 +535,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -579,6 +643,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -663,7 +735,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
index 12720eb19..6b90e1370 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Persistent with https)"
},
"name": "datagrid65-postgresql-persistent"
},
"labels": {
"template": "datagrid65-postgresql-persistent",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,102 +111,140 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -215,7 +268,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -239,7 +293,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -382,7 +437,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -467,9 +522,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -570,6 +630,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -654,7 +722,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
index da8015fb0..ae36376db 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
@@ -6,82 +6,96 @@
"iconClass": "icon-jboss",
"description": "Application template for JDG 6.5 and PostgreSQL applications built using.",
"tags": "datagrid,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 + PostgreSQL (Ephemeral with https)"
},
"name": "datagrid65-postgresql"
},
"labels": {
"template": "datagrid65-postgresql",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new data grid service (using PostgreSQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "datagrid-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Username",
"description": "User name for JDG user.",
"name": "USERNAME",
"value": "",
"required": false
},
{
- "description": "Password for JDG user.",
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
"name": "PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "datagrid-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/postgresql",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -89,6 +103,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -96,96 +111,133 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "Infinispan Connectors",
"description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
"name": "INFINISPAN_CONNECTORS",
"value": "hotrod,memcached,rest",
"required": false
},
{
+ "displayName": "Cache Names",
"description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
"name": "CACHE_NAMES",
"value": "",
"required": false
},
{
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
"description": "",
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "",
"required": false
},
{
+ "displayName": "Memcached Cache Name",
"description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
"name": "MEMCACHED_CACHE",
"value": "default",
"required": false
},
{
+ "displayName": "REST Security Domain",
"description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
"name": "REST_SECURITY_DOMAIN",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "datagrid-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
"required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -209,7 +261,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -233,7 +286,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -376,7 +430,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datagrid65-openshift:1.2"
+ "name": "jboss-datagrid65-openshift:1.4"
}
}
},
@@ -461,9 +515,14 @@
"protocol": "TCP"
},
{
- "name": "hotrod",
+ "name": "hotrod-internal",
"containerPort": 11222,
"protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
}
],
"env": [
@@ -564,6 +623,14 @@
"value": "${CACHE_NAMES}"
},
{
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
"name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
"value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
},
@@ -648,7 +715,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
index 7d64dac98..ea2f13742 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (no SSL)"
},
"name": "datavirt63-basic-s2i"
},
@@ -60,6 +61,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -133,6 +135,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -215,7 +238,22 @@
"uri": "${SOURCE_REPOSITORY_URL}",
"ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir": "${CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
},
"strategy": {
"type": "Source",
@@ -224,8 +262,26 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
- }
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
}
},
"output": {
@@ -252,6 +308,15 @@
"imageChange": {}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
index 1e7c03b99..22b579ecc 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -6,7 +6,8 @@
"iconClass": "icon-jboss",
"description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
"tags": "jdv,datavirt,jboss,xpaas",
- "version": "1.4.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL and Extensions)"
},
"name": "datavirt63-extensions-support-s2i"
},
@@ -102,6 +103,7 @@
},
{
"description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
"name": "SERVICE_ACCOUNT_NAME",
"value": "datavirt-service-account",
"required": true
@@ -238,6 +240,27 @@
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -446,6 +469,19 @@
{
"from": {
"kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ },
+ {
+ "from": {
+ "kind": "ImageStreamTag",
"name": "${APPLICATION_NAME}-ext:latest"
},
"paths": [
@@ -464,12 +500,24 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-datavirt63-openshift:1.0"
+ "name": "jboss-datavirt63-openshift:1.2"
},
"env": [
{
"name": "CUSTOM_INSTALL_DIRECTORIES",
"value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
]
}
@@ -507,6 +555,15 @@
}
},
{
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
"type": "ConfigChange"
}
]
@@ -713,7 +770,7 @@
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE",
- "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ "value": "${HTTPS_KEYSTORE}"
},
{
"name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
new file mode 100644
index 000000000..9392c20a6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
@@ -0,0 +1,940 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3 (with SSL)"
+ },
+ "name": "datavirt63-secure-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-secure-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "displayName": "Service Account Name",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of source directories containing VDBs for deployment",
+ "displayName": "VDB Deployment Directories",
+ "name": "VDB_DIRS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "displayName": "SSO Server URL",
+ "name": "SSO_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "SSO Server Service URL",
+ "name": "SSO_SERVICE_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "displayName": "SSO Realm",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "displayName": "SSO Username",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "displayName": "SSO User's Password",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Realm Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
+ "displayName": "SSO Realm Public Key",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type. true or false",
+ "displayName": "SSO Bearer Only",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "displayName": "SSO SAML Keystore Secret",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "displayName": "SSO SAML Keystore File",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "displayName": "SSO SAML Certificate Alias",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "displayName": "SSO SAML Keystore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "displayName": "SSO Client Secret",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Enable CORS for SSO applications. true or false",
+ "name": "SSO_ENABLE_CORS",
+ "displayName": "SSO Enable CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "displayName": "SSO SAML Logout Page",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "displayName": "SSO Disable SSL Certificate Validation",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "displayName": "SSO Truststore File",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "displayName": "SSO Truststore Password",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "displayName": "SSO Truststore Secret",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO OpenIDConnect via auth-method",
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "displayName": "SSO OpenIDConnect Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Comma delimited list of deployments that shoulds be exploded and enabled for SSO SAML via auth-method",
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "displayName": "SSO SAML Deployments",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/datagrid65",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.2"
+ },
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "VDB_DIRS",
+ "value": "${VDB_DIRS}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jboss-datagrid65-client-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_OPENIDCONNECT_DEPLOYMENTS",
+ "value": "${SSO_OPENIDCONNECT_DEPLOYMENTS}"
+ },
+ {
+ "name": "SSO_SAML_DEPLOYMENTS",
+ "value": "${SSO_SAML_DEPLOYMENTS}"
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
index 754a3b4c0..1989036fa 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server + A-MQ (with https)"
},
"name": "decisionserver62-amq-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-amq-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,102 +40,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -138,6 +160,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,18 +168,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +198,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
@@ -198,7 +226,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -222,7 +251,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -564,7 +594,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
index 8be4ac90b..25b2c162c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (no https)"
},
"name": "decisionserver62-basic-s2i"
},
@@ -14,20 +15,24 @@
"template": "decisionserver62-basic-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,6 +113,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
index bf9047599..85605d642 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
@@ -5,8 +5,9 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.2 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.3.3",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server (with https)"
},
"name": "decisionserver62-https-s2i"
},
@@ -14,32 +15,38 @@
"template": "decisionserver62-https-s2i",
"xpaas": "1.3.3"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +162,7 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
index 51e667e02..ecea54d94 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server + A-MQ (with https)"
},
"name": "decisionserver63-amq-s2i"
},
"labels": {
"template": "decisionserver63-amq-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,108 +40,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -144,6 +167,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -151,18 +175,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,10 +205,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -204,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -228,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -334,13 +378,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
@@ -574,7 +626,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
index c5f0d006a..d655dbe94 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (no https)"
},
"name": "decisionserver63-basic-s2i"
},
"labels": {
"template": "decisionserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,48 +40,56 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -84,6 +97,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -91,6 +105,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -98,10 +113,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -184,13 +213,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
index 3db0e4c84..78e79c0cf 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BRMS 6.3 decision server HTTPS applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "decisionserver,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server (with https)"
},
"name": "decisionserver63-https-s2i"
},
"labels": {
"template": "decisionserver63-https-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,78 +54,91 @@
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "decisionserver/hellorules",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "decisionserver-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -126,6 +146,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +154,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -140,10 +162,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -273,13 +309,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-decisionserver63-openshift:1.3"
+ "name": "jboss-decisionserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-amq-s2i.json
new file mode 100644
index 000000000..c688a2a67
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-amq-s2i.json
@@ -0,0 +1,748 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server + A-MQ (with https)"
+ },
+ "name": "decisionserver64-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-amq-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS/A-MQ application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-basic-s2i.json
new file mode 100644
index 000000000..778c51844
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-basic-s2i.json
@@ -0,0 +1,376 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (no https)"
+ },
+ "name": "decisionserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-https-s2i.json
new file mode 100644
index 000000000..e6c6961c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver64-https-s2i.json
@@ -0,0 +1,517 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.4 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server (with https)"
+ },
+ "name": "decisionserver64-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver64-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BRMS application with SSL support has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. Please be sure to create the \"decisionserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
index 72dbb4302..912838175 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -5,131 +5,153 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Persistent with https)"
},
"name": "eap64-amq-persistent-s2i"
},
"labels": {
"template": "eap64-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -641,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
index 9dd847451..dd4c7a27b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
@@ -5,119 +5,139 @@
"annotations": {
"description": "Application template for EAP 6 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + A-MQ (Ephemeral with https)"
},
"name": "eap64-amq-s2i"
},
"labels": {
"template": "eap64-amq-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -626,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
index 7b1800b7b..e13b3851b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (no https)"
},
"name": "eap64-basic-s2i"
},
"labels": {
"template": "eap64-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
index 31716d84c..0da32eb40 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https)"
},
"name": "eap64-https-s2i"
},
"labels": {
"template": "eap64-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "6.4.x",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 212431056..77b75466d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Persistent with https)"
},
"name": "eap64-mongodb-persistent-s2i"
},
"labels": {
"template": "eap64-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -674,7 +744,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
index 13fbbdd93..2785782d4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 6 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MongoDB (Ephemeral with https)"
},
"name": "eap64-mongodb-s2i"
},
"labels": {
"template": "eap64-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -668,7 +737,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
index 69fdec206..cca0f9c2b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Persistent with https)"
},
"name": "eap64-mysql-persistent-s2i"
},
"labels": {
"template": "eap64-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -681,7 +752,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
index 2bd3c249f..5766506fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 6 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + MySQL (Ephemeral with https)"
},
"name": "eap64-mysql-s2i"
},
"labels": {
"template": "eap64-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -675,7 +745,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 31f245950..01891774d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Persistent with https)"
},
"name": "eap64-postgresql-persistent-s2i"
},
"labels": {
"template": "eap64-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -666,7 +734,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
index eac964697..e00f2b0e3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + PostgreSQL (Ephemeral with https)"
},
"name": "eap64-postgresql-s2i"
},
"labels": {
"template": "eap64-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
}
}
},
@@ -660,7 +727,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
index 09023be71..ec0739d04 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 + Single Sign-On (with https)"
},
"name": "eap64-sso-s2i"
},
"labels": {
"template": "eap64-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 6 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap64-openshift:1.4"
+ "name": "jboss-eap64-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -615,7 +671,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-third-party-db-s2i.json
new file mode 100644
index 000000000..e8f6d6585
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-third-party-db-s2i.json
@@ -0,0 +1,646 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap64-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap64-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 6 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within HornetQ subsystem.",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Cluster Admin Password",
+ "description": "Admin password for HornetQ cluster.",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
index 3ca9e9fab..3f0eba6e3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -3,121 +3,155 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (Persistent with https)"
},
- "name": "eap70-amq-s2i"
+ "name": "eap70-amq-persistent-s2i"
},
"labels": {
- "template": "eap70-amq-s2i",
- "xpaas": "1.3.2"
+ "template": "eap70-amq-persistent-s2i",
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": true
+ "required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +159,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,18 +167,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -151,6 +189,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -158,36 +197,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,10 +240,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -222,7 +281,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -246,7 +306,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -348,11 +409,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -613,7 +684,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -626,7 +700,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
@@ -704,6 +778,12 @@
"protocol": "TCP"
}
],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
"env": [
{
"name": "AMQ_USER",
@@ -730,6 +810,10 @@
"value": "${MQ_SERIALIZABLE_PACKAGES}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "${AMQ_MESH_DISCOVERY_TYPE}"
},
@@ -751,10 +835,38 @@
}
]
}
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
]
}
}
}
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
index f08cdf2f9..f2d65f353 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
@@ -3,133 +3,141 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + A-MQ (with https)"
},
- "name": "eap70-amq-persistent-s2i"
+ "name": "eap70-amq-s2i"
},
"labels": {
- "template": "eap70-amq-persistent-s2i",
- "xpaas": "1.3.2"
+ "template": "eap70-amq-s2i",
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "helloworld-mdb",
"required": false
},
{
- "description": "Size of persistent storage for database volume.",
- "name": "VOLUME_CAPACITY",
- "value": "512Mi",
- "required": true
- },
- {
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
"value": "java:/ConnectionFactory",
"required": false
},
{
- "description": "Split the data directory for each node in a mesh.",
- "name": "AMQ_SPLIT",
- "value": "false",
- "required": false
- },
- {
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "HELLOWORLDMDBQueue",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "HELLOWORLDMDBTopic",
"required": false
},
{
+ "displayName": "A-MQ Serializable Packages",
"description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
"name": "MQ_SERIALIZABLE_PACKAGES",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -137,6 +145,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,18 +153,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +175,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,36 +183,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -207,10 +226,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -234,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTP port."
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -258,7 +292,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's HTTPS port."
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,11 +395,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -625,10 +670,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling",
- "rollingParams": {
- "maxSurge": 0
- }
+ "type": "Recreate"
},
"triggers": [
{
@@ -641,7 +683,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
@@ -719,12 +761,6 @@
"protocol": "TCP"
}
],
- "volumeMounts": [
- {
- "mountPath": "/opt/amq/data/kahadb",
- "name": "${APPLICATION_NAME}-amq-pvol"
- }
- ],
"env": [
{
"name": "AMQ_USER",
@@ -751,10 +787,6 @@
"value": "${MQ_SERIALIZABLE_PACKAGES}"
},
{
- "name": "AMQ_SPLIT",
- "value": "${AMQ_SPLIT}"
- },
- {
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "${AMQ_MESH_DISCOVERY_TYPE}"
},
@@ -776,38 +808,10 @@
}
]
}
- ],
- "volumes": [
- {
- "name": "${APPLICATION_NAME}-amq-pvol",
- "persistentVolumeClaim": {
- "claimName": "${APPLICATION_NAME}-amq-claim"
- }
- }
]
}
}
}
- },
- {
- "apiVersion": "v1",
- "kind": "PersistentVolumeClaim",
- "metadata": {
- "name": "${APPLICATION_NAME}-amq-claim",
- "labels": {
- "application": "${APPLICATION_NAME}"
- }
- },
- "spec": {
- "accessModes": [
- "ReadWriteOnce"
- ],
- "resources": {
- "requests": {
- "storage": "${VOLUME_CAPACITY}"
- }
- }
- }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
index 83b4d5b24..c33e3f7cb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
@@ -6,58 +6,68 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (no https)"
},
"name": "eap70-basic-s2i"
},
"labels": {
"template": "eap70-basic-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application has been created in your project.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -65,6 +75,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -72,6 +83,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -79,12 +91,14 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -92,10 +106,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -174,11 +202,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
index 1292442a4..7542d31c8 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
@@ -6,100 +6,117 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.2"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https)"
},
"name": "eap70-https-s2i"
},
"labels": {
"template": "eap70-https-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.0.GA",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "kitchensink",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,10 +183,24 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -287,11 +326,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
index 99db77d58..8a7da66c1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -5,149 +5,175 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Persistent with https)"
},
"name": "eap70-mongodb-persistent-s2i"
},
"labels": {
"template": "eap70-mongodb-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -176,6 +205,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -183,6 +213,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -190,36 +221,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -227,10 +264,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -254,7 +312,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -278,7 +337,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -380,11 +440,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -685,7 +755,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
index c8150c231..ae52a3deb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
@@ -5,143 +5,168 @@
"annotations": {
"description": "Application template for EAP 7 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MongoDB (Ephemeral with https)"
},
"name": "eap70-mongodb-s2i"
},
"labels": {
"template": "eap70-mongodb-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -170,6 +198,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -177,6 +206,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -184,36 +214,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -221,10 +257,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -248,7 +305,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +330,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -374,11 +433,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -679,7 +748,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
index f8e5c2b04..a0a3d7717 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -5,159 +5,187 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Persistent with https)"
},
"name": "eap70-mysql-persistent-s2i"
},
"labels": {
"template": "eap70-mysql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +193,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -172,6 +201,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -179,6 +209,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -186,6 +217,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -193,36 +225,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -230,10 +268,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -257,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -281,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -383,11 +444,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -696,7 +767,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
index 1edeb62e7..8255ade5d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
@@ -5,153 +5,180 @@
"annotations": {
"description": "Application template for EAP 7 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + MySQL (Ephemeral with https)"
},
"name": "eap70-mysql-s2i"
},
"labels": {
"template": "eap70-mysql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +186,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -166,6 +194,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,6 +202,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -180,6 +210,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -187,36 +218,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -377,11 +437,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -690,7 +760,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
index d11df06ee..436c541d8 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -5,144 +5,169 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Persistent with https)"
},
"name": "eap70-postgresql-persistent-s2i"
},
"labels": {
"template": "eap70-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -150,6 +175,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -157,6 +183,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -164,6 +191,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -171,6 +199,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -178,36 +207,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -215,10 +250,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -242,7 +298,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -266,7 +323,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -368,11 +426,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
index 6b7f6d707..a2a37a886 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
@@ -5,138 +5,162 @@
"annotations": {
"description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + PostgreSQL (Ephemeral with https)"
},
"name": "eap70-postgresql-s2i"
},
"labels": {
"template": "eap70-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/TodoListDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "MQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
- "required": false
+ "required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ cluster password",
"description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -144,6 +168,7 @@
"required": true
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -151,6 +176,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,6 +184,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -165,6 +192,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -172,36 +200,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -362,11 +419,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
}
}
},
@@ -675,7 +742,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
index 811602220..08a844cd9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
@@ -3,103 +3,120 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
+ "iconClass": "icon-jboss",
"description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
- "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
- "version": "1.3.2"
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 + Single Sign-On (with https)"
},
"name": "eap70-sso-s2i"
},
"labels": {
"template": "eap70-sso-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new EAP 7 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "eap-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTP",
"value": "",
"required": true
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": true
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "7.0.x-ose",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "eap7-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "eap7-app-secret",
"required": true
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -107,6 +124,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -114,6 +132,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +140,42 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,84 +183,98 @@
"required": true
},
{
+ "displayName": "Deploy Exploded Archives",
"description": "Controls whether exploded deployment content should be automatically deployed",
"name": "AUTO_DEPLOY_EXPLODED",
"value": "false",
"required": false
},
{
+ "displayName": "URL for SSO",
"description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
"name": "SSO_URL",
"value": "",
"required": true
},
{
- "description": "The URL for the interal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
"name": "SSO_SERVICE_URL",
"value": "https://secure-sso:8443/auth",
"required": false
},
{
+ "displayName": "SSO Realm",
"description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": true
},
{
+ "displayName": "SSO Username",
"description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
"name": "SSO_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Password",
"description": "The password for the SSO service user.",
"name": "SSO_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Public Key",
"description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
"name": "SSO_PUBLIC_KEY",
"value": "",
"required": false
},
{
+ "displayName": "SSO Bearer Only?",
"description": "SSO Client Access Type",
"name": "SSO_BEARER_ONLY",
"value": "",
"required": false
},
{
+ "displayName": "Artifact Directories",
"description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
"name": "ARTIFACT_DIR",
"value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Secret",
"description": "The name of the secret containing the keystore file",
"name": "SSO_SAML_KEYSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
},
{
+ "displayName": "SSO SAML Keystore",
"description": "The name of the keystore file within the secret",
"name": "SSO_SAML_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "SSO SAML Certificate Name",
"description": "The name associated with the server certificate",
"name": "SSO_SAML_CERTIFICATE_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "SSO SAML Keystore Password",
"description": "The password for the keystore and certificate",
"name": "SSO_SAML_KEYSTORE_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "SSO Client Secret",
"description": "The SSO Client Secret for Confidential Access",
"name": "SSO_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -243,42 +282,55 @@
"required": true
},
{
+ "displayName": "Enable CORS for SSO?",
"description": "Enable CORS for SSO applications",
"name": "SSO_ENABLE_CORS",
"value": "false",
"required": false
},
{
+ "displayName": "SSO SAML Logout Page",
"description": "SSO logout page for SAML applications",
"name": "SSO_SAML_LOGOUT_PAGE",
"value": "/",
"required": false
},
{
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
"description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
"name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
"value": "true",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "eap7-app-secret",
"required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
}
- ],
+ ],
"objects": [
{
"kind": "Service",
@@ -406,7 +458,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.4"
+ "name": "jboss-eap70-openshift:1.5"
},
"env": [
{
@@ -416,6 +468,10 @@
{
"name": "MAVEN_ARGS_APPEND",
"value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
}
]
}
@@ -593,7 +649,7 @@
{
"name": "HOSTNAME_HTTPS",
"value": "${HOSTNAME_HTTPS}"
- },
+ },
{
"name": "HTTPS_KEYSTORE_DIR",
"value": "/etc/eap-secret-volume"
@@ -626,7 +682,7 @@
"name": "HORNETQ_TOPICS",
"value": "${HORNETQ_TOPICS}"
},
- {
+ {
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "${JGROUPS_ENCRYPT_SECRET}"
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-third-party-db-s2i.json
new file mode 100644
index 000000000..9e854d7ab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-third-party-db-s2i.json
@@ -0,0 +1,657 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap70-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap70-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within Messaging subsystem.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within Messaging subsystem.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Messaging Cluster Admin Password",
+ "description": "Admin password for Messaging cluster.",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 413a6de87..4e42e0eca 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (no https)"
},
"name": "jws30-tomcat7-basic-s2i"
},
"labels": {
"template": "jws30-tomcat7-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
index 610ea9441..f5fc2e581 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat7,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 (with https)"
},
"name": "jws30-tomcat7-https-s2i"
},
"labels": {
"template": "jws30-tomcat7-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 6ef9d6e4c..2a73a182c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat7-mongodb-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index 9b48f8ae7..a71dfa634 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat7-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat7-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 30af703ce..9a05dcbd5 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat7-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index c2843af63..553a30a44 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat7-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b8372f374..a5c6c8a56 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat7-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index cd5bb9fa4..61a3208e4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7 + PostgreSQL (Ephemeral with https)"
},
"name": "jws30-tomcat7-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat7-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ "name": "jboss-webserver30-tomcat7-openshift:1.3"
}
}
},
@@ -401,7 +459,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -525,7 +583,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
index cb1e49d29..75d08e99d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -6,46 +6,54 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (no https)"
},
"name": "jws30-tomcat8-basic-s2i"
},
"labels": {
"template": "jws30-tomcat8-basic-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -53,6 +61,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -60,6 +69,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -67,6 +77,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -74,10 +85,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -156,11 +181,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -248,7 +283,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
index 21d5662c7..71577bec4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -6,76 +6,89 @@
"iconClass": "icon-tomcat",
"description": "Application template for JWS applications built using S2I.",
"tags": "tomcat,tomcat8,java,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 (with https)"
},
"name": "jws30-tomcat8-https-s2i"
},
"labels": {
"template": "jws30-tomcat8-https-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "tomcat-websocket-chat",
"required": false
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -83,6 +96,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -233,11 +263,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -326,7 +366,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 34657d826..de86dd83e 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -5,125 +5,147 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Persistent with https)"
},
"name": "jws30-tomcat8-mongodb-persistent-s2i"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"labels": {
"template": "jws30-tomcat8-mongodb-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -131,6 +153,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -138,6 +161,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -145,6 +169,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -152,6 +177,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -159,6 +185,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -166,6 +193,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -173,10 +201,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -200,7 +249,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -224,7 +274,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -326,11 +377,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -419,7 +480,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -547,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index 974cfaddb..6dc85e226 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -5,119 +5,140 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MongoDB applications built using S2I.",
- "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MongoDB (Ephemeral with https)"
},
"name": "jws30-tomcat8-mongodb-s2i"
},
"labels": {
"template": "jws30-tomcat8-mongodb-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-mongodb",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
"value": "",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MongoDB No Preallocation",
"description": "Disable data file preallocation.",
"name": "MONGODB_NOPREALLOC",
"required": false
},
{
+ "displayName": "MongoDB Small Files",
"description": "Set MongoDB to use a smaller default data file size.",
"name": "MONGODB_SMALLFILES",
"required": false
},
{
+ "displayName": "MongoDB Quiet",
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
"name": "MONGODB_QUIET",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -125,6 +146,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -132,6 +154,7 @@
"required": true
},
{
+ "displayName": "Database admin password",
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -139,6 +162,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -146,6 +170,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -153,6 +178,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -160,6 +186,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -167,10 +194,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
}
],
"objects": [
@@ -194,7 +242,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -218,7 +267,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
}
}
},
@@ -320,11 +370,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -413,7 +473,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -541,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mongodb:latest"
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 7a8231cc5..0e96b58a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -5,135 +5,159 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Persistent with https)"
},
"name": "jws30-tomcat8-mysql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -141,6 +165,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +173,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -155,6 +181,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -162,6 +189,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -169,6 +197,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -176,10 +205,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -203,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -227,7 +278,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -329,11 +381,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -422,7 +484,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -546,7 +608,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index cda21f237..08b040863 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -5,129 +5,152 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS MySQL applications built using S2I.",
- "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
- "version": "1.2.0"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + MySQL (Ephemeral with https)"
},
"name": "jws30-tomcat8-mysql-s2i"
},
"labels": {
"template": "jws30-tomcat8-mysql-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -135,6 +158,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -142,6 +166,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -149,6 +174,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -156,6 +182,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -163,6 +190,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -170,10 +198,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -197,7 +246,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -221,7 +271,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -323,11 +374,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -416,7 +477,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -540,7 +601,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 4dfc98015..f117e6624 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -5,120 +5,141 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + PostgreSQL (Persistent with https)"
},
"name": "jws30-tomcat8-postgresql-persistent-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-persistent-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -126,6 +147,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -133,6 +155,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -140,6 +163,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -147,6 +171,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -154,6 +179,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -161,10 +187,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -188,7 +235,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -212,7 +260,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -314,11 +363,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -407,7 +466,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -531,7 +590,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index f6c85668c..faece1269 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -5,114 +5,134 @@
"annotations": {
"iconClass": "icon-tomcat",
"description": "Application template for JWS PostgreSQL applications built using S2I.",
- "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
- "version": "1.3.2"
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8 + (PostgreSQL with https)"
},
"name": "jws30-tomcat8-postgresql-s2i"
},
"labels": {
"template": "jws30-tomcat8-postgresql-s2i",
- "xpaas": "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "jws-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.2",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "todolist/todolist-jdbc",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
"name": "DB_JNDI",
"value": "jboss/datasources/defaultDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Secret Name",
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
"value": "jws-app-secret",
"required": true
},
{
+ "displayName": "Certificate Name",
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
"value": "server.crt",
"required": false
},
{
+ "displayName": "Certificate Key Name",
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
"value": "server.key",
"required": false
},
{
+ "displayName": "Certificate Password",
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -120,6 +140,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -127,6 +148,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Username",
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
@@ -134,6 +156,7 @@
"required": true
},
{
+ "displayName": "JWS Admin Password",
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -141,6 +164,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -148,6 +172,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -155,10 +180,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -182,7 +228,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -206,7 +253,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -308,11 +356,21 @@
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ "name": "jboss-webserver30-tomcat8-openshift:1.3"
}
}
},
@@ -400,7 +458,7 @@
"command": [
"/bin/bash",
"-c",
- "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
]
}
},
@@ -524,7 +582,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..6db6e8cc6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (no https)"
+ },
+ "name": "jws31-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-https-s2i.json
new file mode 100644
index 000000000..fd5fca316
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 (with https)"
+ },
+ "name": "jws31-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..6bbea8ab8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..a565ee4c0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..be6899958
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..2983cc905
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..cc5ea452c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..bd23e1558
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat7-postgresql-s2i.json
@@ -0,0 +1,651 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7 + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat7-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 7 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat7-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..f3a5786f6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-basic-s2i.json
@@ -0,0 +1,319 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (no https)"
+ },
+ "name": "jws31-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-https-s2i.json
new file mode 100644
index 000000000..634948a80
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-https-s2i.json
@@ -0,0 +1,438 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 (with https)"
+ },
+ "name": "jws31-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..1ad60d8cc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,715 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-persistent-s2i"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..f3e918afc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mongodb-s2i.json
@@ -0,0 +1,674 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MongoDB (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MongoDB) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MongoDB database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..08b456440
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,718 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..260515b73
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-mysql-s2i.json
@@ -0,0 +1,677 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + MySQL (Ephemeral with https)"
+ },
+ "name": "jws31-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using MySQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..eef5b6939
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,692 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + PostgreSQL (Persistent with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..07ef7218a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws31-tomcat8-postgresql-s2i.json
@@ -0,0 +1,649 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8 + (PostgreSQL with https)"
+ },
+ "name": "jws31-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws31-tomcat8-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new JWS application for Tomcat 8 (using PostgreSQL) has been created in your project. The username/password for administering your JWS is ${JWS_ADMIN_USERNAME}/${JWS_ADMIN_PASSWORD}. For accessing the PostgreSQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"jws-service-account\" service account and the secret named \"${JWS_HTTPS_SECRET}\" containing the ${JWS_HTTPS_CERTIFICATE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Secret Name",
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Certificate Name",
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Key Name",
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "displayName": "Certificate Password",
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Username",
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JWS Admin Password",
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver31-tomcat8-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl --noproxy '*' -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
index cd0bec3c1..cd0bec3c1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
index 2ecce08a9..2ecce08a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
index d80939efb..d80939efb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
index f99099868..f99099868 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
index 143e16756..a48e204ae 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -6,13 +6,14 @@
"iconClass": "icon-jboss",
"description": "Application template for Java applications built using S2I.",
"tags": "java,xpaas",
- "version": "1.0.0"
+ "version": "1.1.0",
+ "openshift.io/display-name": "Red Hat OpenJDK 8"
},
"name": "openjdk18-web-basic-s2i"
},
"labels": {
"template": "openjdk18-web-basic-s2i",
- "xpaas": "1.0.0"
+ "xpaas": "1.4.0"
},
"message": "A new java application has been created in your project.",
"parameters": [
@@ -155,7 +156,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-openjdk18-openshift:1.0"
+ "name": "redhat-openjdk18-openshift:1.1"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
index 1dea463ac..d1705c88c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Persistent with https)"
},
"name": "processserver63-amq-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,46 +231,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -246,6 +287,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -253,18 +295,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -272,6 +317,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -279,10 +325,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -306,7 +373,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -330,7 +398,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -460,13 +529,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -788,7 +865,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -919,7 +996,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
index 42264585b..665cb76a3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
},
"name": "processserver63-amq-mysql-s2i"
},
"labels": {
"template": "processserver63-amq-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,46 +217,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -234,6 +273,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -241,18 +281,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -260,6 +303,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -267,10 +311,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -294,7 +359,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -318,7 +384,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -448,13 +515,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -740,7 +815,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -837,7 +912,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
index f6d0c99ed..5a395a0f3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
},
"name": "processserver63-amq-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,144 +54,168 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "Split Data?",
"description": "Split the data directory for each node in a mesh.",
"name": "AMQ_SPLIT",
"value": "false",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -192,6 +223,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -199,31 +231,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -231,6 +269,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -238,18 +277,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -257,6 +299,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -264,10 +307,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -291,7 +355,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -315,7 +380,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -445,13 +511,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -773,7 +847,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -892,7 +966,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
index 41c726cf0..e7c5efdc9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-amq-postgresql-s2i"
},
"labels": {
"template": "processserver63-amq-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,132 +54,154 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Request",
"description": "JNDI name of request queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_REQUEST",
"value": "queue/KIE.SERVER.REQUEST",
"required": false
},
{
+ "displayName": "KIE Server JMS Queues Response",
"description": "JNDI name of response queue for JMS.",
"name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
"value": "queue/KIE.SERVER.RESPONSE",
"required": false
},
{
+ "displayName": "KIE Server Executor JMS Queue",
"description": "JNDI name of executor queue for JMS.",
"name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
"value": "queue/KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "JMS Connection Factory JNDI Name",
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
"name": "MQ_JNDI",
"value": "java:/JmsXA",
"required": false
},
{
+ "displayName": "A-MQ Protocols",
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
"value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -180,6 +209,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -187,31 +217,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "A-MQ Username",
"description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -219,6 +255,7 @@
"required": false
},
{
+ "displayName": "A-MQ Password",
"description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -226,18 +263,21 @@
"required": false
},
{
+ "displayName": "A-MQ Mesh Discovery Type",
"description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
"name": "AMQ_MESH_DISCOVERY_TYPE",
"value": "kube",
"required": false
},
{
+ "displayName": "A-MQ Storage Limit",
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
"required": false
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -245,6 +285,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -252,10 +293,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -279,7 +341,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -303,7 +366,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
}
}
},
@@ -433,13 +497,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -725,7 +797,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
@@ -810,7 +882,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.3"
+ "name": "jboss-amq-62:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
index 170c919cb..e70d20a6e 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
@@ -5,29 +5,34 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,javaee,java,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server (no https)"
},
"name": "processserver63-basic-s2i"
},
"labels": {
"template": "processserver63-basic-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -35,54 +40,63 @@
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.H2Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -90,6 +104,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -97,6 +112,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -104,10 +120,24 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
}
],
"objects": [
@@ -190,13 +220,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
index 89d0db1a6..f76b07b0b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Persistent with https)"
},
"name": "processserver63-mysql-persistent-s2i"
},
"labels": {
"template": "processserver63-mysql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,46 +189,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -210,6 +245,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -217,6 +253,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -224,10 +261,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -251,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -275,7 +334,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -381,13 +441,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -681,7 +749,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
index 26cab29f8..a3be02eab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + MySQL (Ephemeral with https)"
},
"name": "processserver63-mysql-s2i"
},
"labels": {
"template": "processserver63-mysql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.MySQL5Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,46 +182,55 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -204,6 +238,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -211,6 +246,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -218,10 +254,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -245,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -269,7 +327,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -375,13 +434,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -639,7 +706,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
index 32a512829..361b177f9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Persistent with https)"
},
"name": "processserver63-postgresql-persistent-s2i"
},
"labels": {
"template": "processserver63-postgresql-persistent-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,108 +54,126 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -156,6 +181,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -163,31 +189,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -195,6 +227,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -202,6 +235,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -209,10 +243,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -236,7 +291,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -260,7 +316,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -366,13 +423,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -666,7 +731,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
index 55e2199bb..451915a1d 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
@@ -5,41 +5,48 @@
"annotations": {
"description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
- "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.3"
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server + PostgreSQL (Ephemeral with https)"
},
"name": "processserver63-postgresql-s2i"
},
"labels": {
"template": "processserver63-postgresql-s2i",
- "xpaas": "1.3.3"
+ "xpaas": "1.4.0"
},
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
"parameters": [
{
+ "displayName": "KIE Container Deployment",
"description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
"required": false
},
{
+ "displayName": "KIE Server Protocol",
"description": "The protocol to access the KIE Server REST interface.",
"name": "KIE_SERVER_PROTOCOL",
"value": "https",
"required": false
},
{
+ "displayName": "KIE Server Port",
"description": "The port to access the KIE Server REST interface.",
"name": "KIE_SERVER_PORT",
"value": "8443",
"required": false
},
{
+ "displayName": "KIE Server Username",
"description": "The user name to access the KIE Server REST or JMS interface.",
"name": "KIE_SERVER_USER",
"value": "kieserver",
"required": false
},
{
+ "displayName": "KIE Server Password",
"description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
"name": "KIE_SERVER_PASSWORD",
"from": "[a-zA-Z]{6}[0-9]{1}!",
@@ -47,102 +54,119 @@
"required": false
},
{
+ "displayName": "KIE Server Domain",
"description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
"name": "KIE_SERVER_DOMAIN",
"value": "other",
"required": false
},
{
+ "displayName": "KIE Server Persistence Dialect",
"description": "Hibernate persistence dialect.",
"name": "KIE_SERVER_PERSISTENCE_DIALECT",
"value": "org.hibernate.dialect.PostgreSQL82Dialect",
"required": false
},
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "kie-app",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Git Repository URL",
"description": "Git source URI for application",
"name": "SOURCE_REPOSITORY_URL",
"value": "https://github.com/jboss-openshift/openshift-quickstarts",
"required": true
},
{
+ "displayName": "Git Reference",
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
"value": "1.3",
"required": false
},
{
+ "displayName": "Context Directory",
"description": "Path within Git project to build; empty for root project directory.",
"name": "CONTEXT_DIR",
"value": "processserver/library",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
"name": "DB_JNDI",
"value": "java:jboss/datasources/ExampleDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Queues",
"description": "Queue names",
"name": "HORNETQ_QUEUES",
"value": "",
"required": false
},
{
+ "displayName": "Topics",
"description": "Topic names",
"name": "HORNETQ_TOPICS",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "processserver-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate",
"name": "HTTPS_NAME",
"value": "jboss",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate",
"name": "HTTPS_PASSWORD",
"value": "mykeystorepass",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -150,6 +174,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -157,31 +182,37 @@
"required": true
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "HornetQ Password",
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -189,6 +220,7 @@
"required": true
},
{
+ "displayName": "Github Webhook Secret",
"description": "GitHub trigger secret",
"name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -196,6 +228,7 @@
"required": true
},
{
+ "displayName": "Generic Webhook Secret",
"description": "Generic build trigger secret",
"name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
@@ -203,10 +236,31 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -230,7 +284,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -254,7 +309,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -360,13 +416,21 @@
{
"name": "KIE_CONTAINER_DEPLOYMENT",
"value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
}
],
"forcePull": true,
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-processserver63-openshift:1.3"
+ "name": "jboss-processserver63-openshift:1.4"
}
}
},
@@ -624,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
new file mode 100644
index 000000000..293d04d63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-persistent-s2i.json
@@ -0,0 +1,1156 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-s2i.json
new file mode 100644
index 000000000..760940b36
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-mysql-s2i.json
@@ -0,0 +1,1034 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..1603bccff
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-persistent-s2i.json
@@ -0,0 +1,1126 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-amq-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-s2i.json
new file mode 100644
index 000000000..422f51c11
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-amq-postgresql-s2i.json
@@ -0,0 +1,1004 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server AMQ and PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + A-MQ + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-amq-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-amq-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL and A-MQ) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. And for the A-MQ service use the credentials ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Request",
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server JMS Queues Response",
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Executor JMS Queue",
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"},{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-63",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-basic-s2i.json
new file mode 100644
index 000000000..2bf15ff25
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-basic-s2i.json
@@ -0,0 +1,383 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server (no https)"
+ },
+ "name": "processserver64-basic-s2i"
+ },
+ "labels": {
+ "template": "processserver64-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.H2Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..4673dfb0d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-persistent-s2i.json
@@ -0,0 +1,860 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Persistent with https)"
+ },
+ "name": "processserver64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-s2i.json
new file mode 100644
index 000000000..9078f20b8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-mysql-s2i.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + MySQL (Ephemeral with https)"
+ },
+ "name": "processserver64-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using MySQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..75b6d310e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-persistent-s2i.json
@@ -0,0 +1,830 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Persistent with https)"
+ },
+ "name": "processserver64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-s2i.json
new file mode 100644
index 000000000..51923c0ad
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver64-postgresql-s2i.json
@@ -0,0 +1,753 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.4 intelligent process server PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "processserver64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver64-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new BPMS application (using PostgreSQL) has been created in your project. The username/password for accessing the KIE Server REST or JMS interface is ${KIE_SERVER_USER}/${KIE_SERVER_PASSWORD}. For accessing the MySQL database \"${DB_DATABASE}\" use the credentials ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"processserver-service-account\" service account and the secret named \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content.",
+ "parameters": [
+ {
+ "displayName": "KIE Container Deployment",
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Protocol",
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Port",
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Username",
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Password",
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Domain",
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "displayName": "KIE Server Persistence Dialect",
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver64-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
index 8b3cd6ed0..8b3cd6ed0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
index bc5bbad22..bc5bbad22 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
index e54fa0d59..e54fa0d59 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
index 20ba97dac..20ba97dac 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
index 555647fab..555647fab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
index cf9a4e903..cf9a4e903 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
index c78a96f7c..c78a96f7c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
index 620425902..620425902 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
index 15cfc93fd..15cfc93fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
index c70ee7726..c70ee7726 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
index fb0578a67..5e956f449 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
@@ -5,110 +5,129 @@
"annotations": {
"description": "Application template for SSO 7.0",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,java,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0"
},
"name": "sso70-https"
},
"labels": {
"template": "sso70-https",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -116,54 +135,65 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
@@ -283,10 +313,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
index dcbb24bf1..0fb2703c7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Persistent)"
},
"name": "sso70-mysql-persistent"
},
"labels": {
"template": "sso70-mysql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,36 +159,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -173,58 +202,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -248,7 +295,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -272,7 +320,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -364,10 +413,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -639,7 +688,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
index 1768f7a1b..9beae806b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
@@ -5,123 +5,145 @@
"annotations": {
"description": "Application template for SSO 7.0 MySQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + MySQL (Ephemeral)"
},
"name": "sso70-mysql"
},
"labels": {
"template": "sso70-mysql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "MySQL Lower Case Table Names",
"description": "Sets how the table names are stored and compared.",
"name": "MYSQL_LOWER_CASE_TABLE_NAMES",
"required": false
},
{
+ "displayName": "MySQL Maximum number of connections",
"description": "The maximum permitted number of simultaneous client connections.",
"name": "MYSQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "MySQL FullText Minimum Word Length",
"description": "The minimum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MIN_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL FullText Maximum Word Length",
"description": "The maximum length of the word to be included in a FULLTEXT index.",
"name": "MYSQL_FT_MAX_WORD_LEN",
"required": false
},
{
+ "displayName": "MySQL AIO",
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
"name": "MYSQL_AIO",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -129,6 +151,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -136,30 +159,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -167,58 +195,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
}
],
"objects": [
@@ -240,10 +286,11 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -268,7 +315,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,7 +397,7 @@
"name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}",
- "component": "server"
+ "component": "server"
}
},
"spec": {
@@ -364,10 +412,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -641,7 +689,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "mysql:latest"
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
index 4c2f81f2e..e22399351 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications with persistent storage",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Persistent)"
},
"name": "sso70-postgresql-persistent"
},
"labels": {
"template": "sso70-postgresql-persistent",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,36 +141,42 @@
"required": true
},
{
+ "displayName": "Database Volume Capacity",
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
"value": "512Mi",
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -158,58 +184,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -233,7 +277,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -257,7 +302,8 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +395,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -624,7 +670,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
index d8402ef72..aa8ebaa8e 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
@@ -5,108 +5,127 @@
"annotations": {
"description": "Application template for SSO 7.0 PostgreSQL applications",
"iconClass" : "icon-jboss",
- "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
- "version" : "1.3.2"
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.0 + PostgreSQL (Ephemeral)"
},
"name": "sso70-postgresql"
},
"labels": {
"template": "sso70-postgresql",
- "xpaas" : "1.3.2"
+ "xpaas": "1.4.0"
},
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
"parameters": [
{
+ "displayName": "Application Name",
"description": "The name for the application.",
"name": "APPLICATION_NAME",
"value": "sso",
"required": true
},
{
+ "displayName": "Custom http Route Hostname",
"description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTP",
"value": "",
"required": false
},
{
+ "displayName": "Custom https Route Hostname",
"description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
"name": "HOSTNAME_HTTPS",
"value": "",
"required": false
},
{
+ "displayName": "Database JNDI Name",
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
"value": "java:jboss/datasources/KeycloakDS",
"required": false
},
{
+ "displayName": "Database Name",
"description": "Database name",
"name": "DB_DATABASE",
"value": "root",
"required": true
},
{
+ "displayName": "Service Account Name",
"description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
"name": "SERVICE_ACCOUNT_NAME",
"value": "sso-service-account",
"required": true
},
{
+ "displayName": "Server Keystore Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "HTTPS_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "Server Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "HTTPS_KEYSTORE",
"value": "keystore.jks",
"required": false
},
{
+ "displayName": "Server Keystore Type",
"description": "The type of the keystore file (JKS or JCEKS)",
"name": "HTTPS_KEYSTORE_TYPE",
"value": "",
"required": false
},
{
+ "displayName": "Server Certificate Name",
"description": "The name associated with the server certificate (e.g. jboss)",
"name": "HTTPS_NAME",
"value": "",
"required": false
},
{
+ "displayName": "Server Keystore Password",
"description": "The password for the keystore and certificate (e.g. mykeystorepass)",
"name": "HTTPS_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "Datasource Minimum Pool Size",
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
"name": "DB_MIN_POOL_SIZE",
- "required": false
+ "required": false
},
{
+ "displayName": "Datasource Maximum Pool Size",
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
"name": "DB_MAX_POOL_SIZE",
"required": false
},
{
+ "displayName": "Datasource Transaction Isolation",
"description": "Sets transaction-isolation for the configured datasource.",
"name": "DB_TX_ISOLATION",
"required": false
},
{
+ "displayName": "PostgreSQL Maximum number of connections",
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
"name": "POSTGRESQL_MAX_CONNECTIONS",
"required": false
},
{
+ "displayName": "PostgreSQL Shared Buffers",
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
"name": "POSTGRESQL_SHARED_BUFFERS",
"required": false
},
{
+ "displayName": "Database Username",
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
@@ -114,6 +133,7 @@
"required": true
},
{
+ "displayName": "Database Password",
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -121,30 +141,35 @@
"required": true
},
{
+ "displayName": "JGroups Secret Name",
"description": "The name of the secret containing the keystore file",
"name": "JGROUPS_ENCRYPT_SECRET",
"value": "sso-app-secret",
"required": false
},
{
+ "displayName": "JGroups Keystore Filename",
"description": "The name of the keystore file within the secret",
"name": "JGROUPS_ENCRYPT_KEYSTORE",
"value": "jgroups.jceks",
"required": false
},
{
+ "displayName": "JGroups Certificate Name",
"description": "The name associated with the server certificate (e.g. secret-key)",
"name": "JGROUPS_ENCRYPT_NAME",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Keystore Password",
"description": "The password for the keystore and certificate (e.g. password)",
"name": "JGROUPS_ENCRYPT_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "JGroups Cluster Password",
"description": "JGroups cluster password",
"name": "JGROUPS_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
@@ -152,58 +177,76 @@
"required": true
},
{
+ "displayName": "ImageStream Namespace",
"description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
"name": "IMAGE_STREAM_NAMESPACE",
"value": "openshift",
"required": true
},
{
+ "displayName": "SSO Admin Username",
"description": "SSO Server admin username",
"name": "SSO_ADMIN_USERNAME",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Admin Password",
"description": "SSO Server admin password",
"name": "SSO_ADMIN_PASSWORD",
- "value": "admin",
- "required": false
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
},
{
+ "displayName": "SSO Realm",
"description": "Realm to be created in the SSO server (e.g. demo).",
"name": "SSO_REALM",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Username",
"description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
"name": "SSO_SERVICE_USERNAME",
"value": "",
"required": false
},
{
+ "displayName": "SSO Service Password",
"description": "The password for the SSO service user.",
"name": "SSO_SERVICE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store",
"description": "The name of the truststore file within the secret (e.g. truststore.jks)",
"name": "SSO_TRUSTSTORE",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Password",
"description": "The password for the truststore and certificate (e.g. mykeystorepass)",
"name": "SSO_TRUSTSTORE_PASSWORD",
"value": "",
"required": false
},
{
+ "displayName": "SSO Trust Store Secret",
"description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
"name": "SSO_TRUSTSTORE_SECRET",
"value": "sso-app-secret",
"required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
}
],
"objects": [
@@ -228,7 +271,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -253,7 +297,8 @@
"component": "server"
},
"annotations": {
- "description": "The web server's https port."
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
}
}
},
@@ -349,10 +394,10 @@
"containerNames": [
"${APPLICATION_NAME}"
],
- "from": {
+ "from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "redhat-sso70-openshift:1.3"
+ "name": "redhat-sso70-openshift:1.4"
}
}
},
@@ -626,7 +671,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "postgresql:latest"
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-https.json
new file mode 100644
index 000000000..bee86d7c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-https.json
@@ -0,0 +1,544 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "name": "sso71-https"
+ },
+ "labels": {
+ "template": "sso71-https",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql-persistent.json
new file mode 100644
index 000000000..49b37f348
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql-persistent.json
@@ -0,0 +1,799 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Persistent)"
+ },
+ "name": "sso71-mysql-persistent"
+ },
+ "labels": {
+ "template": "sso71-mysql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql.json
new file mode 100644
index 000000000..634a75bab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-mysql.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 MySQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + MySQL (Ephemeral)"
+ },
+ "name": "sso71-mysql"
+ },
+ "labels": {
+ "template": "sso71-mysql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using MySQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql-persistent.json
new file mode 100644
index 000000000..c53bb9d5b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql-persistent.json
@@ -0,0 +1,773 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Persistent)"
+ },
+ "name": "sso71-postgresql-persistent"
+ },
+ "labels": {
+ "template": "sso71-postgresql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql.json
new file mode 100644
index 000000000..c1fc41eda
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso71-postgresql.json
@@ -0,0 +1,741 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.1 PostgreSQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,jboss,xpaas",
+ "version": "1.0.0",
+ "openshift.io/display-name": "Red Hat Single Sign-On 7.1 + PostgreSQL (Ephemeral)"
+ },
+ "name": "sso71-postgresql"
+ },
+ "labels": {
+ "template": "sso71-postgresql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications; \"${SSO_TRUSTSTORE_SECRET}\" containing the ${SSO_TRUSTSTORE} file used for securing SSO requests.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Username",
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Admin Password",
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Username",
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Service Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso71-openshift:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 551e21e72..1a4562776 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -53,7 +53,7 @@
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ item }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_rhel | bool
with_items:
- "{{ rhel_image_streams }}"
@@ -63,7 +63,7 @@
- name: Import Centos Image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ centos_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ centos_image_streams }}
when: openshift_examples_load_centos | bool
register: oex_import_centos_streams
failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0"
@@ -71,7 +71,7 @@
- name: Import db templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ db_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
when: openshift_examples_load_db_templates | bool
register: oex_import_db_templates
failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -88,7 +88,7 @@
- "{{ quickstarts_base }}/django.json"
- name: Remove defunct quickstart templates from openshift namespace
- command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}"
+ command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- nodejs-example
- cakephp-example
@@ -100,7 +100,7 @@
- name: Import quickstart-templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
when: openshift_examples_load_quickstarts | bool
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -114,7 +114,7 @@
- "{{ xpaas_templates_base }}/sso70-basic.json"
- name: Remove old xPaas templates from openshift namespace
- command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}"
+ command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- sso70-basic
register: oex_delete_old_xpaas_templates
@@ -123,7 +123,7 @@
- name: Import xPaas image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_streams
failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -131,7 +131,7 @@
- name: Import xPaas templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_templates
failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index e048bd107..80cb88d45 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -1,47 +1,69 @@
OpenShift Excluder
-================
+==================
Manages the excluder packages which add yum and dnf exclusions ensuring that
-the packages we care about are not inadvertantly updated. See
+the packages we care about are not inadvertently updated. See
https://github.com/openshift/origin/tree/master/contrib/excluder
Requirements
------------
-openshift_facts
+None
-Facts
------
+Inventory Variables
+-------------------
-| Name | Default Value | Description |
------------------------------|---------------|----------------------------------------|
-| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
-| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
-| enable_excluders | None | Enable all excluders
+| Name | Default Value | Description |
+---------------------------------------|----------------------------|----------------------------------------|
+| openshift_enable_excluders | True | Enable all excluders |
+| openshift_enable_docker_excluder | openshift_enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
+| openshift_enable_openshift_excluder | openshift_enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
Role Variables
--------------
-None
+
+| Name | Default | Choices | Description |
+|-------------------------------------------|---------|-----------------|---------------------------------------------------------------------------|
+| r_openshift_excluder_action | enable | enable, disable | Action to perform when calling this role |
+| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
+| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
+| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
------------
-Tasks to include
-----------------
-
-- exclude: enable excluders (assuming excluders are installed)
-- unexclude: disable excluders (assuming excluders are installed)
-- install: install excluders (installation is followed by excluder enabling)
-- enable: enable excluders (optionally with installation step)
-- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
-- status: determine status of excluders
+- lib_utils
Example Playbook
----------------
+```yaml
+- name: Demonstrate OpenShift Excluder usage
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ # Disable all excluders
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ # Enable all excluders
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ # Disable all excluders and verify appropriate excluder packages are available for upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
+```
TODO
----
+
It should be possible to manage the two excluders independently though that's not a hard requirement. However it should be done to manage docker on RHEL Containerized hosts.
License
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index 7c3ae2a86..d4f151142 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -1,6 +1,19 @@
---
# keep the 'current' package or update to 'latest' if available?
-openshift_excluder_package_state: present
-docker_excluder_package_state: present
+r_openshift_excluder_package_state: present
+r_openshift_excluder_docker_package_state: present
-enable_excluders: true
+# Legacy variables are included for backwards compatibility with v3.5
+# Inventory variables Legacy
+# openshift_enable_excluders enable_excluders
+# openshift_enable_openshift_excluder enable_openshift_excluder
+# openshift_enable_docker_excluder enable_docker_excluder
+r_openshift_excluder_enable_excluders: "{{ openshift_enable_excluders | default(enable_excluders) | default(true) }}"
+r_openshift_excluder_enable_openshift_excluder: "{{ openshift_enable_openshift_excluder | default(enable_openshift_excluder) | default(r_openshift_excluder_enable_excluders) }}"
+r_openshift_excluder_enable_docker_excluder: "{{ openshift_enable_docker_excluder | default(enable_docker_excluder) | default(r_openshift_excluder_enable_excluders) }}"
+
+# Default action when calling this role
+r_openshift_excluder_action: enable
+
+# When upgrading, this variable should be set to true when calling the role
+r_openshift_excluder_verify_upgrade: false
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 4d1c1efca..871081c19 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Scott Dodson
- description: OpenShift Examples
+ description: OpenShift Excluder
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.2
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- { role: openshift_facts }
-- { role: openshift_repos }
+- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 325d2a4e8..5add25b45 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -1,39 +1,40 @@
---
-# input variables
-# - excluder_package_state
-# - docker_excluder_package_state
-- include: init.yml
+- when: r_openshift_excluder_verify_upgrade
+ block:
+ - name: Include verify_upgrade.yml when upgrading
+ include: verify_upgrade.yml
-# Install any excluder that is enabled
-- include: install.yml
+# unexclude the current openshift/origin-excluder if it is installed so it can be updated
+- name: Disable excluders before the upgrade to remove older excluding expressions
+ include: unexclude.yml
vars:
- # Both docker_excluder_on and openshift_excluder_on are set in openshift_excluder->init task
- install_docker_excluder: "{{ docker_excluder_on | bool }}"
- install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when: docker_excluder_on or openshift_excluder_on
+ # before the docker excluder can be updated, it needs to be disabled
+ # to remove older excluded packages that are no longer excluded
+ unexclude_docker_excluder: "{{ r_openshift_excluder_enable_docker_excluder }}"
+ unexclude_openshift_excluder: "{{ r_openshift_excluder_enable_openshift_excluder }}"
- # if the docker excluder is not enabled, we don't care about its status
- # it the docker excluder is enabled, we install it and in case its status is non-zero
- # it is enabled no matter what
+# Install any excluder that is enabled
+- name: Include install.yml
+ include: install.yml
# And finally adjust an excluder in order to update host components correctly. First
# exclude then unexclude
-- block:
- - include: exclude.yml
- vars:
- # Enable the docker excluder only if it is overrided
- # BZ #1430612: docker excluders should be enabled even during installation and upgrade
- exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
- # excluder is to be disabled by default
- exclude_openshift_excluder: false
- # All excluders that are to be disabled are disabled
- - include: unexclude.yml
- vars:
- # If the docker override is not set, default to the generic behaviour
- # BZ #1430612: docker excluders should be enabled even during installation and upgrade
- unexclude_docker_excluder: false
- # disable openshift excluder is never overrided to be enabled
- # disable it if the docker excluder is enabled
- unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when:
- - not openshift.common.is_atomic | bool
+- name: Include exclude.yml
+ include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overridden
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ exclude_docker_excluder: "{{ r_openshift_excluder_enable_docker_excluder }}"
+ # excluder is to be disabled by default
+ exclude_openshift_excluder: false
+
+# All excluders that are to be disabled are disabled
+- name: Include unexclude.yml
+ include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ unexclude_docker_excluder: false
+ # disable openshift excluder is never overridden to be enabled
+ # disable it if the docker excluder is enabled
+ unexclude_openshift_excluder: "{{ r_openshift_excluder_enable_openshift_excluder }}"
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
index e719325bc..fce44cfb5 100644
--- a/roles/openshift_excluder/tasks/enable.yml
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -1,18 +1,6 @@
---
-# input variables:
-- block:
- - include: init.yml
+- name: Install excluders
+ include: install.yml
- - include: install.yml
- vars:
- install_docker_excluder: "{{ docker_excluder_on | bool }}"
- install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when: docker_excluder_on or openshift_excluder_on | bool
-
- - include: exclude.yml
- vars:
- exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
- exclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
-
- when:
- - not openshift.common.is_atomic | bool
+- name: Enable excluders
+ include: exclude.yml
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index ca18d343f..1b4818df9 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,30 +1,22 @@
---
-# input variables:
-# - exclude_docker_excluder
-# - exclude_openshift_excluder
-- block:
+- name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-docker-excluder
+ register: docker_excluder_stat
- - name: Check for docker-excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-docker-excluder
- register: docker_excluder_stat
- - name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when:
- - exclude_docker_excluder | default(false) | bool
- - docker_excluder_stat.stat.exists
+- name: Enable docker excluder
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder exclude"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+ - docker_excluder_stat.stat.exists
- - name: Check for openshift excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-excluder
- register: openshift_excluder_stat
- - name: Enable openshift excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- # if the openshift override is set, it means the openshift excluder is disabled no matter what
- # if the openshift override is not set, the excluder is set based on enable_openshift_excluder
- when:
- - exclude_openshift_excluder | default(false) | bool
- - openshift_excluder_stat.stat.exists
+- name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-excluder
+ register: openshift_excluder_stat
+- name: Enable openshift excluder
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder exclude"
when:
- - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_enable_openshift_excluder | bool
+ - openshift_excluder_stat.stat.exists
diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml
deleted file mode 100644
index 1ea18f363..000000000
--- a/roles/openshift_excluder/tasks/init.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Evalute if docker excluder is to be enabled
- set_fact:
- docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders) | bool }}"
-
-- debug: var=docker_excluder_on
-
-- name: Evalute if openshift excluder is to be enabled
- set_fact:
- openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders) | bool }}"
-
-- debug: var=openshift_excluder_on
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3490a613e..3a866cedf 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,21 +1,24 @@
---
-# input Variables
-# - install_docker_excluder
-# - install_openshift_excluder
-- block:
+
+- when:
+ - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_install_ran is not defined
+
+ block:
- name: Install docker excluder
package:
- name: "{{ openshift.common.service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ docker_excluder_package_state }}"
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- - install_docker_excluder | default(true) | bool
+ - r_openshift_excluder_enable_docker_excluder | bool
- name: Install openshift excluder
package:
- name: "{{ openshift.common.service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ openshift_excluder_package_state }}"
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_package_state }}"
when:
- - install_openshift_excluder | default(true) | bool
- when:
- - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_enable_openshift_excluder | bool
+
+ - set_fact:
+ r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
new file mode 100644
index 000000000..db20b4012
--- /dev/null
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+- block:
+
+ - name: Debug r_openshift_excluder_enable_docker_excluder
+ debug:
+ var: r_openshift_excluder_enable_docker_excluder
+
+ - name: Debug r_openshift_excluder_enable_openshift_excluder
+ debug:
+ var: r_openshift_excluder_enable_openshift_excluder
+
+ - name: Fail if invalid openshift_excluder_action provided
+ fail:
+ msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
+ when: r_openshift_excluder_action not in ['enable', 'disable']
+
+ - name: Fail if r_openshift_excluder_service_type is not defined
+ fail:
+ msg: "r_openshift_excluder_service_type must be specified for this role"
+ when: r_openshift_excluder_service_type is not defined
+
+ - name: Fail if r_openshift_excluder_upgrade_target is not defined
+ fail:
+ msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
+ when:
+ - r_openshift_excluder_verify_upgrade | bool
+ - r_openshift_excluder_upgrade_target is not defined
+
+ - name: Include main action task file
+ include: "{{ r_openshift_excluder_action }}.yml"
+
+ when:
+ - not ostree_booted.stat.exists | bool
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 4df7f14b4..a68165bde 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -2,27 +2,25 @@
# input variables:
# - unexclude_docker_excluder
# - unexclude_openshift_excluder
-- block:
- - name: Check for docker-excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-docker-excluder
- register: docker_excluder_stat
- - name: disable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
- when:
- - unexclude_docker_excluder | default(false) | bool
- - docker_excluder_stat.stat.exists
+- name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-docker-excluder
+ register: docker_excluder_stat
- - name: Check for openshift excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-excluder
- register: openshift_excluder_stat
- - name: disable openshift excluder
- command: "{{ openshift.common.service_type }}-excluder unexclude"
- when:
- - unexclude_openshift_excluder | default(false) | bool
- - openshift_excluder_stat.stat.exists
+- name: disable docker excluder
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder unexclude"
+ when:
+ - unexclude_docker_excluder | default(false) | bool
+ - docker_excluder_stat.stat.exists
+
+- name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-excluder
+ register: openshift_excluder_stat
+- name: disable openshift excluder
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder unexclude"
when:
- - not openshift.common.is_atomic | bool
+ - unexclude_openshift_excluder | default(false) | bool
+ - openshift_excluder_stat.stat.exists
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
new file mode 100644
index 000000000..c35639c1b
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -0,0 +1,32 @@
+---
+# input variables:
+# - excluder
+- name: Get available excluder version
+ repoquery:
+ name: "{{ excluder }}"
+ ignore_excluders: true
+ register: repoquery_out
+
+- name: Fail when excluder package is not found
+ fail:
+ msg: "Package {{ excluder }} not found"
+ when: not repoquery_out.results.package_found
+
+- name: Set fact excluder_version
+ set_fact:
+ excluder_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+
+- name: "{{ excluder }} version detected"
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version }}"
+
+- name: Printing upgrade target version
+ debug:
+ msg: "{{ r_openshift_excluder_upgrade_target }}"
+
+- name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version"
+ when:
+ - excluder_version != ''
+ - excluder_version.split('.')[0:2] | join('.') | version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
new file mode 100644
index 000000000..42026664a
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -0,0 +1,12 @@
+---
+- name: Verify Docker Excluder version
+ include: verify_excluder.yml
+ vars:
+ excluder: "{{ r_openshift_excluder_service_type }}-docker-excluder"
+ when: r_openshift_excluder_enable_docker_excluder | bool
+
+- name: Verify OpenShift Excluder version
+ include: verify_excluder.yml
+ vars:
+ excluder: "{{ r_openshift_excluder_service_type }}-excluder"
+ when: r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 00603f4fa..4cb5418c6 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+ failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift.common.is_containerized | bool
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 28b388560..cc4dc9365 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -1,2 +1,2 @@
---
-use_system_containers: false
+openshift_use_system_containers: false
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index e1f4c4e6d..49cc51b48 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
@@ -194,8 +193,7 @@ def hostname_valid(hostname):
"""
if (not hostname or
hostname.startswith('localhost') or
- hostname.endswith('localdomain') or
- hostname.endswith('novalocal')):
+ hostname.endswith('localdomain')):
return False
return True
@@ -539,6 +537,7 @@ def set_node_schedulability(facts):
return facts
+# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -572,6 +571,10 @@ def set_selectors(facts):
facts['hosted']['logging'] = {}
if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
facts['hosted']['logging']['selector'] = None
+ if 'etcd' not in facts['hosted']:
+ facts['hosted']['etcd'] = {}
+ if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
+ facts['hosted']['etcd']['selector'] = None
return facts
@@ -909,34 +912,37 @@ def set_version_facts_if_unset(facts):
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
+ # 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = True
version_gte_3_5_or_1_5 = True
- version_gte_3_6_or_1_6 = False
+ version_gte_3_6 = True
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
- facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
+ facts['common']['version_gte_3_6'] = version_gte_3_6
- if version_gte_3_5_or_1_5:
+ if version_gte_3_6:
+ examples_content_version = 'v3.6'
+ elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
@@ -1039,10 +1045,13 @@ def set_sdn_facts_if_unset(facts, system_facts):
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
+ if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
+
+ # TODO: The openstack cloudprovider nodename setting was too opinionaed.
+ # It needs to be generalized before it can be enabled again.
+ # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
+ # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
@@ -1300,7 +1309,7 @@ def get_version_output(binary, version_cmd):
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
- if is_service_running('docker'):
+ if is_service_running('docker') or is_service_running('container-engine'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
@@ -1609,14 +1618,7 @@ def sort_unique(alist):
Returns:
list: a sorted de-duped list
"""
-
- alist.sort()
- out = list()
- for i in alist:
- if i not in out:
- out.append(i)
-
- return out
+ return sorted(list(set(alist)))
def safe_get_bool(fact):
@@ -1640,19 +1642,28 @@ def set_proxy_facts(facts):
"""
if 'common' in facts:
common = facts['common']
- if 'http_proxy' in common or 'https_proxy' in common:
+ if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
+
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
+ # masters behind a proxy need to connect to etcd via IP
+ if 'no_proxy_etcd_host_ips' in common:
+ if isinstance(common['no_proxy_etcd_host_ips'], string_types):
+ common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
+
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
common['no_proxy'].append('.' + common['dns_domain'])
+ common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
+
return facts
@@ -1789,6 +1800,12 @@ def set_container_facts_if_unset(facts):
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
+ # If openshift_docker_use_system_container is set and is True ....
+ if 'use_system_container' in list(facts['docker'].keys()):
+ if facts['docker']['use_system_container']:
+ # ... set the service name to container-engine
+ facts['docker']['service_name'] = 'container-engine'
+
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
@@ -1908,14 +1925,16 @@ class OpenShiftFacts(object):
)
self.role = role
+ # Collect system facts and preface each fact with 'ansible_'.
try:
- # ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
+ additional_facts = {}
for (k, v) in self.system_facts.items():
- self.system_facts["ansible_%s" % k.replace('-', '_')] = v
+ additional_facts["ansible_%s" % k.replace('-', '_')] = v
+ self.system_facts.update(additional_facts)
except UnboundLocalError:
- # ansible-2.2
+ # ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
@@ -2069,6 +2088,7 @@ class OpenShiftFacts(object):
hosted_registry_insecure = get_hosted_registry_insecure()
if hosted_registry_insecure is not None:
docker['hosted_registry_insecure'] = hosted_registry_insecure
+ docker['service_name'] = 'docker'
defaults['docker'] = docker
if 'clock' in roles:
@@ -2143,6 +2163,25 @@ class OpenShiftFacts(object):
create_pvc=False
)
),
+ etcd=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='etcd',
+ size='1Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
@@ -2153,6 +2192,12 @@ class OpenShiftFacts(object):
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
+ glusterfs=dict(
+ endpoints='glusterfs-registry-endpoints',
+ path='glusterfs-registry-volume',
+ readOnly=False,
+ swap=False,
+ swapcopy=True),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index f657d86cf..451386bf1 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -9,24 +9,33 @@
l_is_atomic: "{{ ostree_booted.stat.exists }}"
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
- l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
- l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
- l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers) | bool) }}"
- set_fact:
l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+- set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
- name: Validate python version
fail:
msg: |
openshift-ansible requires Python 3 for {{ ansible_distribution }};
For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+ - r_openshift_facts_ran is not defined
- name: Validate python version
fail:
msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+ - r_openshift_facts_ran is not defined
# Fail as early as possible if Atomic and old version of Docker
- block:
@@ -45,7 +54,9 @@
that:
- l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- when: l_is_atomic | bool
+ when:
+ - l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Load variables
include_vars: "{{ item }}"
@@ -56,7 +67,9 @@
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ when:
+ - not l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Ensure various deps for running system containers are installed
package: name={{ item }} state=present
@@ -64,6 +77,7 @@
when:
- not l_is_atomic | bool
- l_any_system_container | bool
+ - r_openshift_facts_ran is not defined
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
@@ -80,6 +94,7 @@
is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
system_images_registry: "{{ system_images_registry | default('') }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
@@ -95,3 +110,7 @@
- name: Set repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+- name: Register that this already ran
+ set_fact:
+ r_openshift_facts_ran: True
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index cf0fe19f1..581dd7d15 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -4,6 +4,7 @@ Ansible action plugin to execute health checks in OpenShift clusters.
# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
+from collections import defaultdict
try:
from __main__ import display
@@ -24,9 +25,11 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
+ task_vars = task_vars or {}
- if task_vars is None:
- task_vars = {}
+ # vars are not supportably available in the callback plugin,
+ # so record any it will need in the result.
+ result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
if "openshift" not in task_vars:
result["failed"] = True
@@ -34,39 +37,37 @@ class ActionModule(ActionBase):
return result
try:
- known_checks = self.load_known_checks()
+ known_checks = self.load_known_checks(tmp, task_vars)
+ args = self._task.args
+ resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
except OpenShiftCheckException as e:
result["failed"] = True
result["msg"] = str(e)
return result
- args = self._task.args
- requested_checks = resolve_checks(args.get("checks", []), known_checks.values())
-
- unknown_checks = requested_checks - set(known_checks)
- if unknown_checks:
- result["failed"] = True
- result["msg"] = (
- "One or more checks are unknown: {}. "
- "Make sure there is no typo in the playbook and no files are missing."
- ).format(", ".join(unknown_checks))
- return result
-
result["checks"] = check_results = {}
- for check_name in requested_checks & set(known_checks):
+ user_disabled_checks = [
+ check.strip()
+ for check in task_vars.get("openshift_disable_check", "").split(",")
+ ]
+
+ for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
- if check.is_active(task_vars):
+ if not check.is_active():
+ r = dict(skipped=True, skipped_reason="Not active for this host")
+ elif check_name in user_disabled_checks:
+ r = dict(skipped=True, skipped_reason="Disabled by user request")
+ else:
try:
- r = check.run(tmp, task_vars)
+ r = check.run()
except OpenShiftCheckException as e:
- r = {}
- r["failed"] = True
- r["msg"] = str(e)
- else:
- r = {"skipped": True}
+ r = dict(
+ failed=True,
+ msg=str(e),
+ )
check_results[check_name] = r
@@ -77,14 +78,11 @@ class ActionModule(ActionBase):
result["changed"] = any(r.get("changed", False) for r in check_results.values())
return result
- def load_known_checks(self):
+ def load_known_checks(self, tmp, task_vars):
load_checks()
known_checks = {}
-
- known_check_classes = set(cls for cls in OpenShiftCheck.subclasses())
-
- for cls in known_check_classes:
+ for cls in OpenShiftCheck.subclasses():
check_name = cls.name
if check_name in known_checks:
other_cls = known_checks[check_name].__class__
@@ -93,27 +91,46 @@ class ActionModule(ActionBase):
check_name,
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
- known_checks[check_name] = cls(execute_module=self._execute_module)
-
+ known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
return known_checks
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
- Resolving a check name involves expanding tag references (e.g., '@tag') with
- all the checks that contain the given tag.
+ Resolving a check name expands tag references (e.g., "@tag") to all the
+ checks that contain the given tag. OpenShiftCheckException is raised if
+ names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
- resolved = set()
- for name in names:
- if name.startswith("@"):
- for check in all_checks:
- if name[1:] in check.tags:
- resolved.add(check.name)
- else:
- resolved.add(name)
+ known_check_names = set(check.name for check in all_checks)
+ known_tag_names = set(name for check in all_checks for name in check.tags)
+
+ check_names = set(name for name in names if not name.startswith('@'))
+ tag_names = set(name[1:] for name in names if name.startswith('@'))
+
+ unknown_check_names = check_names - known_check_names
+ unknown_tag_names = tag_names - known_tag_names
+
+ if unknown_check_names or unknown_tag_names:
+ msg = []
+ if unknown_check_names:
+ msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
+ if unknown_tag_names:
+ msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
+ msg.append('Make sure there is no typo in the playbook and no files are missing.')
+ raise OpenShiftCheckException('\n'.join(msg))
+
+ tag_to_checks = defaultdict(set)
+ for check in all_checks:
+ for tag in check.tags:
+ tag_to_checks[tag].add(check.name)
+
+ resolved = check_names.copy()
+ for tag in tag_names:
+ resolved.update(tag_to_checks[tag])
+
return resolved
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 208e81048..d10200719 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -1,7 +1,12 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
-Ansible callback plugin.
-'''
+"""
+Ansible callback plugin to give a nicely formatted summary of failures.
+"""
+
+# Reason: In several locations below we disable pylint protected-access
+# for Ansible objects that do not give us any public way
+# to access the full details we need to report check failures.
+# Status: disabled permanently or until Ansible object has a public API.
+# This does leave the code more likely to be broken by future Ansible changes.
from pprint import pformat
@@ -11,48 +16,48 @@ from ansible.utils.color import stringc
class CallbackModule(CallbackBase):
- '''
+ """
This callback plugin stores task results and summarizes failures.
The file name is prefixed with `zz_` to make this plugin be loaded last by
Ansible, thus making its output the last thing that users see.
- '''
+ """
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'failure_summary'
CALLBACK_NEEDS_WHITELIST = False
+ _playbook_file = None
def __init__(self):
super(CallbackModule, self).__init__()
self.__failures = []
+ def v2_playbook_on_start(self, playbook):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+ # re: playbook attrs see top comment # pylint: disable=protected-access
+ self._playbook_file = playbook._file_name
+
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
- self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
+ if not ignore_errors:
+ self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
- # TODO: update condition to consider a host var or env var to
- # enable/disable the summary, so that we can control the output from a
- # play.
if self.__failures:
- self._print_failure_summary()
+ self._print_failure_details(self.__failures)
- def _print_failure_summary(self):
- '''Print a summary of failed tasks (including ignored failures).'''
+ def _print_failure_details(self, failures):
+ """Print a summary of failed tasks or checks."""
self._display.display(u'\nFailure summary:\n')
- # TODO: group failures by host or by task. If grouped by host, it is
- # easy to see all problems of a given host. If grouped by task, it is
- # easy to see what hosts needs the same fix.
-
- width = len(str(len(self.__failures)))
+ width = len(str(len(failures)))
initial_indent_format = u' {{:>{width}}}. '.format(width=width)
initial_indent_len = len(initial_indent_format.format(0))
subsequent_indent = u' ' * initial_indent_len
subsequent_extra_indent = u' ' * (initial_indent_len + 10)
- for i, failure in enumerate(self.__failures, 1):
+ for i, failure in enumerate(failures, 1):
entries = _format_failure(failure)
self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
for entry in entries[1:]:
@@ -60,15 +65,60 @@ class CallbackModule(CallbackBase):
indented = u'{}{}'.format(subsequent_indent, entry)
self._display.display(indented)
-
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of a task result to implement this method.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+ failed_checks = set()
+ playbook_context = None
+ # re: result attrs see top comment # pylint: disable=protected-access
+ for failure in failures:
+ # Get context from check task result since callback plugins cannot access task vars.
+ # NOTE: thus context is not known unless checks run. Failures prior to checks running
+ # don't have playbook_context in the results. But we only use it now when checks fail.
+ playbook_context = playbook_context or failure['result']._result.get('playbook_context')
+ failed_checks.update(
+ name
+ for name, result in failure['result']._result.get('checks', {}).items()
+ if result.get('failed')
+ )
+ if failed_checks:
+ self._print_check_failure_summary(failed_checks, playbook_context)
+
+ def _print_check_failure_summary(self, failed_checks, context):
+ checks = ','.join(sorted(failed_checks))
+ # The purpose of specifying context is to vary the output depending on what the user was
+ # expecting to happen (based on which playbook they ran). The only use currently is to
+ # vary the message depending on whether the user was deliberately running checks or was
+ # trying to install/upgrade and checks are just included. Other use cases may arise.
+ summary = ( # default to explaining what checks are in the first place
+ '\n'
+ 'The execution of "{playbook}"\n'
+ 'includes checks designed to fail early if the requirements\n'
+ 'of the playbook are not met. One or more of these checks\n'
+ 'failed. To disregard these results, you may choose to\n'
+ 'disable failing checks by setting an Ansible variable:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Failing check names are shown in the failure details above.\n'
+ 'Some checks may be configurable by variables if your requirements\n'
+ 'are different from the defaults; consult check documentation.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n\n'
+ ).format(playbook=self._playbook_file, checks=checks)
+ if context in ['pre-install', 'health']:
+ summary = ( # user was expecting to run checks, less explanation needed
+ '\n'
+ 'You may choose to configure or disable failing checks by\n'
+ 'setting Ansible variables. To disable those above:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Consult check documentation for configurable variables.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n\n'
+ ).format(checks=checks)
+ self._display.display(summary)
+
+
+# re: result attrs see top comment # pylint: disable=protected-access
def _format_failure(failure):
- '''Return a list of pretty-formatted text entries describing a failure, including
+ """Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
- by a newline separator when output to the user.'''
+ by a newline separator when output to the user."""
result = failure['result']
host = result._host.get_name()
play = _get_play(result._task)
@@ -89,7 +139,7 @@ def _format_failure(failure):
def _format_failed_checks(checks):
- '''Return pretty-formatted text describing checks that failed.'''
+ """Return pretty-formatted text describing checks that failed."""
failed_check_msgs = []
for check, body in checks.items():
if body.get('failed', False): # only show the failed checks
@@ -101,13 +151,10 @@ def _format_failed_checks(checks):
return stringc(pformat(checks), C.COLOR_ERROR)
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of obj to implement this function.
-# This is inspired by ansible.playbook.base.Base.dump_me.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+# This is inspired by ansible.playbook.base.Base.dump_me.
+# re: play/task/block attrs see top comment # pylint: disable=protected-access
def _get_play(obj):
- '''Given a task or block, recursively tries to find its parent play.'''
+ """Given a task or block, recursively try to find its parent play."""
if hasattr(obj, '_play'):
return obj._play
if getattr(obj, '_parent'):
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 191a4b107..f9babebb9 100755..100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,91 +1,215 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
-Ansible module for determining if multiple versions of an OpenShift package are
-available, and if the version requested is available down to the given
-precision.
+"""
+Ansible module for yum-based systems determining if multiple releases
+of an OpenShift package are available, and if the release requested
+(if any) is available down to the given precision.
+
+For Enterprise, multiple releases available suggest that multiple repos
+are enabled for the different releases, which may cause installation
+problems. With Origin, however, this is a normal state of affairs as
+all the releases are provided in a single repo with the expectation that
+only the latest can be installed.
+
+Code in the openshift_version role contains a lot of logic to pin down
+the exact package and image version to use and so does some validation
+of release availability already. Without duplicating all that, we would
+like the user to have a helpful error message if we detect things will
+not work out right. Note that if openshift_release is not specified in
+the inventory, the version comparison checks just pass.
+"""
-Multiple versions available suggest that multiple repos are enabled for the
-different versions, which may cause installation problems.
-'''
+from ansible.module_utils.basic import AnsibleModule
+# NOTE: because of the dependency on yum (Python 2-only), this module does not
+# work under Python 3. But since we run unit tests against both Python 2 and
+# Python 3, we use six for cross compatibility in this module alone:
+from ansible.module_utils.six import string_types
-import yum # pylint: disable=import-error
+IMPORT_EXCEPTION = None
+try:
+ import yum # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err
-from ansible.module_utils.basic import AnsibleModule
+
+class AosVersionException(Exception):
+ """Base exception class for package version problems"""
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
-def main(): # pylint: disable=missing-docstring,too-many-branches
+def main():
+ """Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
- prefix=dict(required=True), # atomic-openshift, origin, ...
- version=dict(required=True),
+ package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
- def bail(error): # pylint: disable=missing-docstring
- module.fail_json(msg=error)
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
+
+ # determine the packages we will look for
+ package_list = module.params['package_list']
+ if not package_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # generate set with only the names of expected packages
+ expected_pkg_names = [p["name"] for p in package_list]
+
+ # gather packages that require a multi_minor_release check
+ multi_minor_pkgs = [p for p in package_list if p["check_multi"]]
+
+ # generate list of packages with a specified (non-empty) version
+ # should look like a version string with possibly many segments e.g. "3.4.1"
+ versioned_pkgs = [p for p in package_list if p["version"]]
+
+ # get the list of packages available and complain if anything is wrong
+ try:
+ pkgs = _retrieve_available_packages(expected_pkg_names)
+ if versioned_pkgs:
+ _check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
+ _check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
+ if multi_minor_pkgs:
+ _check_multi_minor_release(pkgs, _to_dict(multi_minor_pkgs))
+ except AosVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
- rpm_prefix = module.params['prefix']
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
- if not rpm_prefix:
- bail("prefix must not be empty")
+def _retrieve_available_packages(expected_pkgs):
+ # search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
- yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
-
- # search for package versions available for aos pkgs
- expected_pkgs = [
- rpm_prefix,
- rpm_prefix + '-master',
- rpm_prefix + '-node',
- ]
+
+ # The openshift excluder prevents unintended updates to openshift
+ # packages by setting yum excludes on those packages. See:
+ # https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
+ # Excludes are then disabled during an install or upgrade, but
+ # this check will most likely be running outside either. When we
+ # attempt to determine what packages are available via yum they may
+ # be excluded. So, for our purposes here, disable excludes to see
+ # what will really be available during an install or upgrade.
+ yb.conf.disable_excludes = ['all']
+
try:
pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
- except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
+ except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
- bail('Unable to find any OpenShift packages.\nCheck your subscription and repo settings.\n%s' % e)
+ raise AosVersionException('\n'.join([
+ 'Unable to find any OpenShift packages.',
+ 'Check your subscription and repo settings.',
+ str(excinfo),
+ ]))
+ return pkgs
+
+
+class PreciseVersionNotFound(AosVersionException):
+ """Exception for reporting packages not available at given version"""
+ def __init__(self, not_found):
+ msg = ['Not all of the required packages are available at their requested version']
+ msg += ['{}:{} '.format(pkg["name"], pkg["version"]) for pkg in not_found]
+ msg += ['Please check your subscriptions and enabled repositories.']
+ AosVersionException.__init__(self, '\n'.join(msg), not_found)
+
+
+def _check_precise_version_found(pkgs, expected_pkgs_dict):
+ # see if any packages couldn't be found at requested release version
+ # we would like to verify that the latest available pkgs have however specific a version is given.
+ # so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails.
+
+ pkgs_precise_version_found = set()
+ for pkg in pkgs:
+ if pkg.name not in expected_pkgs_dict:
+ continue
+ expected_pkg_versions = expected_pkgs_dict[pkg.name]["version"]
+ if isinstance(expected_pkg_versions, string_types):
+ expected_pkg_versions = [expected_pkg_versions]
+ for expected_pkg_version in expected_pkg_versions:
+ # does the version match, to the precision requested?
+ # and, is it strictly greater, at the precision requested?
+ match_version = '.'.join(pkg.version.split('.')[:expected_pkg_version.count('.') + 1])
+ if match_version == expected_pkg_version:
+ pkgs_precise_version_found.add(pkg.name)
+
+ not_found = []
+ for name, pkg in expected_pkgs_dict.items():
+ if name not in pkgs_precise_version_found:
+ not_found.append(pkg)
+
+ if not_found:
+ raise PreciseVersionNotFound(not_found)
+
+
+class FoundHigherVersion(AosVersionException):
+ """Exception for reporting that a higher version than requested is available"""
+ def __init__(self, higher_found):
+ msg = ['Some required package(s) are available at a version',
+ 'that is higher than requested']
+ msg += [' ' + name for name in higher_found]
+ msg += ['This will prevent installing the version you requested.']
+ msg += ['Please check your enabled repositories or adjust openshift_release.']
+ AosVersionException.__init__(self, '\n'.join(msg), higher_found)
+
- # determine what level of precision we're expecting for the version
- expected_version = module.params['version']
- if expected_version.startswith('v'): # v3.3 => 3.3
- expected_version = expected_version[1:]
- num_dots = expected_version.count('.')
+def _check_higher_version_found(pkgs, expected_pkgs_dict):
+ expected_pkg_names = list(expected_pkgs_dict)
+ # see if any packages are available in a version higher than requested
+ higher_version_for_pkg = {}
+ for pkg in pkgs:
+ if pkg.name not in expected_pkg_names:
+ continue
+ expected_pkg_versions = expected_pkgs_dict[pkg.name]["version"]
+ if isinstance(expected_pkg_versions, string_types):
+ expected_pkg_versions = [expected_pkg_versions]
+ # NOTE: the list of versions is assumed to be sorted so that the highest
+ # desirable version is the last.
+ highest_desirable_version = expected_pkg_versions[-1]
+ req_release_arr = [int(segment) for segment in highest_desirable_version.split(".")]
+ version = [int(segment) for segment in pkg.version.split(".")]
+ too_high = version[:len(req_release_arr)] > req_release_arr
+ higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
+ if too_high and higher_than_seen:
+ higher_version_for_pkg[pkg.name] = version
+
+ if higher_version_for_pkg:
+ higher_found = []
+ for name, version in higher_version_for_pkg.items():
+ higher_found.append(name + '-' + '.'.join(str(segment) for segment in version))
+ raise FoundHigherVersion(higher_found)
+
+
+class FoundMultiRelease(AosVersionException):
+ """Exception for reporting multiple minor releases found for same package"""
+ def __init__(self, multi_found):
+ msg = ['Multiple minor versions of these packages are available']
+ msg += [' ' + name for name in multi_found]
+ msg += ["There should only be one OpenShift release repository enabled at a time."]
+ AosVersionException.__init__(self, '\n'.join(msg), multi_found)
+
+
+def _check_multi_minor_release(pkgs, expected_pkgs_dict):
+ # see if any packages are available in more than one minor version
pkgs_by_name_version = {}
- pkgs_precise_version_found = {}
for pkg in pkgs:
- # get expected version precision
- match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
- if match_version == expected_version:
- pkgs_precise_version_found[pkg.name] = True
- # get x.y version precision
- minor_version = '.'.join(pkg.version.split('.')[:2])
+ # keep track of x.y (minor release) versions seen
+ minor_release = '.'.join(pkg.version.split('.')[:2])
if pkg.name not in pkgs_by_name_version:
- pkgs_by_name_version[pkg.name] = {}
- pkgs_by_name_version[pkg.name][minor_version] = True
+ pkgs_by_name_version[pkg.name] = set()
+ pkgs_by_name_version[pkg.name].add(minor_release)
- # see if any packages couldn't be found at requested version
- # see if any packages are available in more than one minor version
- not_found = []
multi_found = []
- for name in expected_pkgs:
- if name not in pkgs_precise_version_found:
- not_found.append(name)
+ for name in expected_pkgs_dict:
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
- if not_found:
- msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
- for name in not_found:
- msg += ' %s\n' % name
- bail(msg + 'Please check your subscriptions and enabled repositories.')
- if multi_found:
- msg = 'Multiple minor versions of these packages are available\n'
- for name in multi_found:
- msg += ' %s\n' % name
- bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
- module.exit_json(changed=False)
+ if multi_found:
+ raise FoundMultiRelease(multi_found)
if __name__ == '__main__':
diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py
index 630ebc848..433795b67 100755..100644
--- a/roles/openshift_health_checker/library/check_yum_update.py
+++ b/roles/openshift_health_checker/library/check_yum_update.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module to test whether a yum update or install will succeed,
without actually performing it or running yum.
diff --git a/roles/openshift_health_checker/library/docker_info.py b/roles/openshift_health_checker/library/docker_info.py
index 7f712bcff..0d0ddae8b 100644
--- a/roles/openshift_health_checker/library/docker_info.py
+++ b/roles/openshift_health_checker/library/docker_info.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring
"""
Ansible module for determining information about the docker host.
@@ -13,6 +12,7 @@ from ansible.module_utils.docker_common import AnsibleDockerClient
def main():
+ """Entrypoint for running an Ansible module."""
client = AnsibleDockerClient()
client.module.exit_json(
diff --git a/roles/openshift_health_checker/library/etcdkeysize.py b/roles/openshift_health_checker/library/etcdkeysize.py
new file mode 100644
index 000000000..620e82d87
--- /dev/null
+++ b/roles/openshift_health_checker/library/etcdkeysize.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+"""Ansible module that recursively determines if the size of a key in an etcd cluster exceeds a given limit."""
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+try:
+ import etcd
+
+ IMPORT_EXCEPTION_MSG = None
+except ImportError as err:
+ IMPORT_EXCEPTION_MSG = str(err)
+
+ from collections import namedtuple
+ EtcdMock = namedtuple("etcd", ["EtcdKeyNotFound"])
+ etcd = EtcdMock(KeyError)
+
+
+# pylint: disable=too-many-arguments
+def check_etcd_key_size(client, key, size_limit, total_size=0, depth=0, depth_limit=1000, visited=None):
+ """Check size of an etcd path starting at given key. Returns tuple (string, bool)"""
+ if visited is None:
+ visited = set()
+
+ if key in visited:
+ return 0, False
+
+ visited.add(key)
+
+ try:
+ result = client.read(key, recursive=False)
+ except etcd.EtcdKeyNotFound:
+ return 0, False
+
+ size = 0
+ limit_exceeded = False
+
+ for node in result.leaves:
+ if depth >= depth_limit:
+ raise Exception("Maximum recursive stack depth ({}) exceeded.".format(depth_limit))
+
+ if size_limit and total_size + size > size_limit:
+ return size, True
+
+ if not node.dir:
+ size += len(node.value)
+ continue
+
+ key_size, limit_exceeded = check_etcd_key_size(client, node.key,
+ size_limit,
+ total_size + size,
+ depth + 1,
+ depth_limit, visited)
+ size += key_size
+
+ max_limit_exceeded = limit_exceeded or (total_size + size > size_limit)
+ return size, max_limit_exceeded
+
+
+def main(): # pylint: disable=missing-docstring,too-many-branches
+ module = AnsibleModule(
+ argument_spec=dict(
+ size_limit_bytes=dict(type="int", default=0),
+ paths=dict(type="list", default=["/openshift.io/images"]),
+ host=dict(type="str", default="127.0.0.1"),
+ port=dict(type="int", default=4001),
+ protocol=dict(type="str", default="http"),
+ version_prefix=dict(type="str", default=""),
+ allow_redirect=dict(type="bool", default=False),
+ cert=dict(type="dict", default=""),
+ ca_cert=dict(type="str", default=None),
+ ),
+ supports_check_mode=True
+ )
+
+ module.params["cert"] = (
+ module.params["cert"]["cert"],
+ module.params["cert"]["key"],
+ )
+
+ size_limit = module.params.pop("size_limit_bytes")
+ paths = module.params.pop("paths")
+
+ limit_exceeded = False
+
+ try:
+ # pylint: disable=no-member
+ client = etcd.Client(**module.params)
+ except AttributeError as attrerr:
+ msg = str(attrerr)
+ if IMPORT_EXCEPTION_MSG:
+ msg = IMPORT_EXCEPTION_MSG
+ if "No module named etcd" in IMPORT_EXCEPTION_MSG:
+ # pylint: disable=redefined-variable-type
+ msg = ('Unable to import the python "etcd" dependency. '
+ 'Make sure python-etcd is installed on the host.')
+
+ module.exit_json(
+ failed=True,
+ changed=False,
+ size_limit_exceeded=limit_exceeded,
+ msg=msg,
+ )
+
+ return
+
+ size = 0
+ for path in paths:
+ path_size, limit_exceeded = check_etcd_key_size(client, path, size_limit - size)
+ size += path_size
+
+ if limit_exceeded:
+ break
+
+ module.exit_json(
+ changed=False,
+ size_limit_exceeded=limit_exceeded,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
new file mode 100644
index 000000000..2e60735d6
--- /dev/null
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+"""Interface to OpenShift oc command"""
+
+import os
+import shlex
+import shutil
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ """Find and return oc binary file"""
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+def main():
+ """Module that executes commands on a remote OpenShift cluster"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ namespace=dict(type="str", required=True),
+ config_file=dict(type="str", required=True),
+ cmd=dict(type="str", required=True),
+ extra_args=dict(type="list", default=[]),
+ ),
+ )
+
+ cmd = [
+ locate_oc_binary(),
+ '--config', module.params["config_file"],
+ '-n', module.params["namespace"],
+ ] + shlex.split(module.params["cmd"])
+
+ failed = True
+ try:
+ cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
+ failed = False
+ except subprocess.CalledProcessError as exc:
+ cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
+ except OSError as exc:
+ # we get this when 'oc' is not there
+ cmd_result = str(exc)
+
+ module.exit_json(
+ changed=False,
+ failed=failed,
+ result=cmd_result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/library/rpm_version.py b/roles/openshift_health_checker/library/rpm_version.py
new file mode 100644
index 000000000..8ea223055
--- /dev/null
+++ b/roles/openshift_health_checker/library/rpm_version.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+"""
+Ansible module for rpm-based systems determining existing package version information in a host.
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+
+IMPORT_EXCEPTION = None
+try:
+ import rpm # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err # in tox test env, rpm import fails
+
+
+class RpmVersionException(Exception):
+ """Base exception class for package version problems"""
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
+
+
+def main():
+ """Entrypoint for this Ansible module"""
+ module = AnsibleModule(
+ argument_spec=dict(
+ package_list=dict(type="list", required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
+
+ # determine the packages we will look for
+ pkg_list = module.params['package_list']
+ if not pkg_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # get list of packages available and complain if any
+ # of them are missing or if any errors occur
+ try:
+ pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
+ _check_pkg_versions(pkg_versions, _to_dict(pkg_list))
+ except RpmVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
+
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
+
+
+def _retrieve_expected_pkg_versions(expected_pkgs_dict):
+ """Search for installed packages matching given pkg names
+ and versions. Returns a dictionary: {pkg_name: [versions]}"""
+
+ transaction = rpm.TransactionSet()
+ pkgs = {}
+
+ for pkg_name in expected_pkgs_dict:
+ matched_pkgs = transaction.dbMatch("name", pkg_name)
+ if not matched_pkgs:
+ continue
+
+ for header in matched_pkgs:
+ if header['name'] == pkg_name:
+ if pkg_name not in pkgs:
+ pkgs[pkg_name] = []
+
+ pkgs[pkg_name].append(header['version'])
+
+ return pkgs
+
+
+def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
+ invalid_pkg_versions = {}
+ not_found_pkgs = []
+
+ for pkg_name, pkg in expected_pkgs_dict.items():
+ if not found_pkgs_dict.get(pkg_name):
+ not_found_pkgs.append(pkg_name)
+ continue
+
+ found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
+ expected_version = _parse_version(pkg["version"])
+ if expected_version not in found_versions:
+ invalid_pkg_versions[pkg_name] = {
+ "found_versions": found_versions,
+ "required_version": expected_version,
+ }
+
+ if not_found_pkgs:
+ raise RpmVersionException(
+ '\n'.join([
+ "The following packages were not found to be installed: {}".format('\n '.join([
+ "{}".format(pkg)
+ for pkg in not_found_pkgs
+ ]))
+ ]),
+ not_found_pkgs,
+ )
+
+ if invalid_pkg_versions:
+ raise RpmVersionException(
+ '\n '.join([
+ "The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
+ " \n{}\n Required version: {}\n Found versions: {}".format(
+ pkg_name,
+ pkg["required_version"],
+ ', '.join([version for version in pkg["found_versions"]]))
+ for pkg_name, pkg in invalid_pkg_versions.items()
+ ]))
+ ]),
+ invalid_pkg_versions,
+ )
+
+
+def _parse_version(version_str):
+ segs = version_str.split('.')
+ if not segs or len(segs) <= 2:
+ return version_str
+
+ return '.'.join(segs[0:2])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/library/search_journalctl.py b/roles/openshift_health_checker/library/search_journalctl.py
new file mode 100644
index 000000000..3631f71c8
--- /dev/null
+++ b/roles/openshift_health_checker/library/search_journalctl.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+"""Interface to journalctl."""
+
+from time import time
+import json
+import re
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class InvalidMatcherRegexp(Exception):
+ """Exception class for invalid matcher regexp."""
+ pass
+
+
+class InvalidLogEntry(Exception):
+ """Exception class for invalid / non-json log entries."""
+ pass
+
+
+class LogInputSubprocessError(Exception):
+ """Exception class for errors that occur while executing a subprocess."""
+ pass
+
+
+def main():
+ """Scan a given list of "log_matchers" for journalctl messages containing given patterns.
+ "log_matchers" is a list of dicts consisting of three keys that help fine-tune log searching:
+ 'start_regexp', 'regexp', and 'unit'.
+
+ Sample "log_matchers" list:
+
+ [
+ {
+ 'start_regexp': r'Beginning of systemd unit',
+ 'regexp': r'the specific log message to find',
+ 'unit': 'etcd',
+ }
+ ]
+ """
+ module = AnsibleModule(
+ argument_spec=dict(
+ log_count_limit=dict(type="int", default=500),
+ log_matchers=dict(type="list", required=True),
+ ),
+ )
+
+ timestamp_limit_seconds = time() - 60 * 60 # 1 hour
+
+ log_count_limit = module.params["log_count_limit"]
+ log_matchers = module.params["log_matchers"]
+
+ matched_regexp, errors = get_log_matches(log_matchers, log_count_limit, timestamp_limit_seconds)
+
+ module.exit_json(
+ changed=False,
+ failed=bool(errors),
+ errors=errors,
+ matched=matched_regexp,
+ )
+
+
+def get_log_matches(matchers, log_count_limit, timestamp_limit_seconds):
+ """Return a list of up to log_count_limit matches for each matcher.
+
+ Log entries are only considered if newer than timestamp_limit_seconds.
+ """
+ matched_regexp = []
+ errors = []
+
+ for matcher in matchers:
+ try:
+ log_output = get_log_output(matcher)
+ except LogInputSubprocessError as err:
+ errors.append(str(err))
+ continue
+
+ try:
+ matched = find_matches(log_output, matcher, log_count_limit, timestamp_limit_seconds)
+ if matched:
+ matched_regexp.append(matcher.get("regexp", ""))
+ except InvalidMatcherRegexp as err:
+ errors.append(str(err))
+ except InvalidLogEntry as err:
+ errors.append(str(err))
+
+ return matched_regexp, errors
+
+
+def get_log_output(matcher):
+ """Return an iterator on the logs of a given matcher."""
+ try:
+ cmd_output = subprocess.Popen(list([
+ '/bin/journalctl',
+ '-ru', matcher.get("unit", ""),
+ '--output', 'json',
+ ]), stdout=subprocess.PIPE)
+
+ return iter(cmd_output.stdout.readline, '')
+
+ except subprocess.CalledProcessError as exc:
+ msg = "Could not obtain journalctl logs for the specified systemd unit: {}: {}"
+ raise LogInputSubprocessError(msg.format(matcher.get("unit", "<missing>"), str(exc)))
+ except OSError as exc:
+ raise LogInputSubprocessError(str(exc))
+
+
+def find_matches(log_output, matcher, log_count_limit, timestamp_limit_seconds):
+ """Return log messages matched in iterable log_output by a given matcher.
+
+ Ignore any log_output items older than timestamp_limit_seconds.
+ """
+ try:
+ regexp = re.compile(matcher.get("regexp", ""))
+ start_regexp = re.compile(matcher.get("start_regexp", ""))
+ except re.error as err:
+ msg = "A log matcher object was provided with an invalid regular expression: {}"
+ raise InvalidMatcherRegexp(msg.format(str(err)))
+
+ matched = None
+
+ for log_count, line in enumerate(log_output):
+ if log_count >= log_count_limit:
+ break
+
+ try:
+ obj = json.loads(line)
+
+ # don't need to look past the most recent service restart
+ if start_regexp.match(obj["MESSAGE"]):
+ break
+
+ log_timestamp_seconds = float(obj["__REALTIME_TIMESTAMP"]) / 1000000
+ if log_timestamp_seconds < timestamp_limit_seconds:
+ break
+
+ if regexp.match(obj["MESSAGE"]):
+ matched = line
+ break
+
+ except ValueError:
+ msg = "Log entry for systemd unit {} contained invalid json syntax: {}"
+ raise InvalidLogEntry(msg.format(matcher.get("unit"), line))
+
+ return matched
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index 0bbeadd34..4d141974c 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -1,3 +1,5 @@
---
dependencies:
- role: openshift_facts
+ - role: openshift_repos
+ - role: openshift_version
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index be63d864a..40a28cde5 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -19,15 +19,21 @@ class OpenShiftCheckException(Exception):
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """A base class for defining checks for an OpenShift cluster environment."""
+ """
+ A base class for defining checks for an OpenShift cluster environment.
+
+ Expect optional params: method execute_module, dict task_vars, and string tmp.
+ execute_module is expected to have a signature compatible with _execute_module
+ from ansible plugins/action/__init__.py, e.g.:
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
+ This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
+ which provides the check's stored task_vars and tmp.
+ """
- def __init__(self, execute_module=None, module_executor=None):
- if execute_module is module_executor is None:
- raise TypeError(
- "__init__() takes either execute_module (recommended) "
- "or module_executor (deprecated), none given")
- self.execute_module = execute_module or module_executor
- self.module_executor = self.execute_module
+ def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ self._execute_module = execute_module
+ self.task_vars = task_vars or {}
+ self.tmp = tmp
@abstractproperty
def name(self):
@@ -43,13 +49,13 @@ class OpenShiftCheck(object):
"""
return []
- @classmethod
- def is_active(cls, task_vars): # pylint: disable=unused-argument
+ @staticmethod
+ def is_active():
"""Returns true if this check applies to the ansible-playbook run."""
return True
@abstractmethod
- def run(self, tmp, task_vars):
+ def run(self):
"""Executes a check, normally implemented as a module."""
return {}
@@ -62,34 +68,64 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
+ def execute_module(self, module_name=None, module_args=None):
+ """Invoke an Ansible module from a check.
+
+ Invoke stored _execute_module, normally copied from the action
+ plugin, with its params and the task_vars and tmp given at
+ check initialization. No positional parameters beyond these
+ are specified. If it's necessary to specify any of the other
+ parameters to _execute_module then that should just be invoked
+ directly (with awareness of changes in method signature per
+ Ansible version).
+
+ So e.g. check.execute_module("foo", dict(arg1=...))
+ Return: result hash from module execution.
+ """
+ if self._execute_module is None:
+ raise NotImplementedError(
+ self.__class__.__name__ +
+ " invoked execute_module without providing the method at initialization."
+ )
+ return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+
+ def get_var(self, *keys, **kwargs):
+ """Get deeply nested values from task_vars.
+
+ Ansible task_vars structures are Python dicts, often mapping strings to
+ other dicts. This helper makes it easier to get a nested value, raising
+ OpenShiftCheckException when a key is not found or returning a default value
+ provided as a keyword argument.
+ """
+ try:
+ value = reduce(operator.getitem, keys, self.task_vars)
+ except (KeyError, TypeError):
+ if "default" in kwargs:
+ return kwargs["default"]
+ raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
+ return value
+
LOADER_EXCLUDES = (
"__init__.py",
"mixins.py",
+ "logging.py",
)
-def load_checks():
+def load_checks(path=None, subpkg=""):
"""Dynamically import all check modules for the side effect of registering checks."""
- return [
- import_module(__package__ + "." + name[:-3])
- for name in os.listdir(os.path.dirname(__file__))
- if name.endswith(".py") and name not in LOADER_EXCLUDES
- ]
+ if path is None:
+ path = os.path.dirname(__file__)
+ modules = []
-def get_var(task_vars, *keys, **kwargs):
- """Helper function to get deeply nested values from task_vars.
+ for name in os.listdir(path):
+ if os.path.isdir(os.path.join(path, name)):
+ modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
+ continue
- Ansible task_vars structures are Python dicts, often mapping strings to
- other dicts. This helper makes it easier to get a nested value, raising
- OpenShiftCheckException when a key is not found or returning a default value
- provided as a keyword argument.
- """
- try:
- value = reduce(operator.getitem, keys, task_vars)
- except (KeyError, TypeError):
- if "default" in kwargs:
- return kwargs["default"]
- raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
- return value
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
+ modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
+
+ return modules
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
new file mode 100644
index 000000000..283461294
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -0,0 +1,114 @@
+"""Check that there is enough disk space in predefined paths."""
+
+import os.path
+import tempfile
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+class DiskAvailability(OpenShiftCheck):
+ """Check that recommended disk space is available before a first-time install."""
+
+ name = "disk_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_disk_space_bytes = {
+ '/var': {
+ 'masters': 40 * 10**9,
+ 'nodes': 15 * 10**9,
+ 'etcd': 20 * 10**9,
+ },
+ # Used to copy client binaries into,
+ # see roles/openshift_cli/library/openshift_container_binary_sync.py.
+ '/usr/local/bin': {
+ 'masters': 1 * 10**9,
+ 'nodes': 1 * 10**9,
+ 'etcd': 1 * 10**9,
+ },
+ # Used as temporary storage in several cases.
+ tempfile.gettempdir(): {
+ 'masters': 1 * 10**9,
+ 'nodes': 1 * 10**9,
+ 'etcd': 1 * 10**9,
+ },
+ }
+
+ def is_active(self):
+ """Skip hosts that do not have recommended disk space requirements."""
+ group_names = self.get_var("group_names", default=[])
+ active_groups = set()
+ for recommendation in self.recommended_disk_space_bytes.values():
+ active_groups.update(recommendation.keys())
+ has_disk_space_recommendation = bool(active_groups.intersection(group_names))
+ return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
+
+ def run(self):
+ group_names = self.get_var("group_names")
+ ansible_mounts = self.get_var("ansible_mounts")
+ ansible_mounts = {mount['mount']: mount for mount in ansible_mounts}
+
+ user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
+ try:
+ # For backwards-compatibility, if openshift_check_min_host_disk_gb
+ # is a number, then it overrides the required config for '/var'.
+ number = float(user_config)
+ user_config = {
+ '/var': {
+ 'masters': number,
+ 'nodes': number,
+ 'etcd': number,
+ },
+ }
+ except TypeError:
+ # If it is not a number, then it should be a nested dict.
+ pass
+
+ # TODO: as suggested in
+ # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
+ # maybe we could support checking disk availability in paths that are
+ # not part of the official recommendation but present in the user
+ # configuration.
+ for path, recommendation in self.recommended_disk_space_bytes.items():
+ free_bytes = self.free_bytes(path, ansible_mounts)
+ recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
+
+ config = user_config.get(path, {})
+ # NOTE: the user config is in GB, but we compare bytes, thus the
+ # conversion.
+ config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
+ recommended_bytes = config_bytes or recommended_bytes
+
+ if free_bytes < recommended_bytes:
+ free_gb = float(free_bytes) / 10**9
+ recommended_gb = float(recommended_bytes) / 10**9
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available disk space in "{}" ({:.1f} GB) '
+ 'is below minimum recommended ({:.1f} GB)'
+ ).format(path, free_gb, recommended_gb)
+ }
+
+ return {}
+
+ @staticmethod
+ def free_bytes(path, ansible_mounts):
+ """Return the size available in path based on ansible_mounts."""
+ mount_point = path
+ # arbitry value to prevent an infinite loop, in the unlike case that '/'
+ # is not in ansible_mounts.
+ max_depth = 32
+ while mount_point not in ansible_mounts and max_depth > 0:
+ mount_point = os.path.dirname(mount_point)
+ max_depth -= 1
+
+ try:
+ free_bytes = ansible_mounts[mount_point]['size_available']
+ except KeyError:
+ known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(ansible_mounts)) or 'none'
+ msg = 'Unable to determine disk availability for "{}". Known mount points: {}.'
+ raise OpenShiftCheckException(msg.format(path, known_mounts))
+
+ return free_bytes
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index cce289b95..77180223e 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,179 +1,178 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+"""Check that required Docker images are available."""
+from openshift_checks import OpenShiftCheck
+from openshift_checks.mixins import DockerHostMixin
-class DockerImageAvailability(OpenShiftCheck):
+
+NODE_IMAGE_SUFFIXES = ["haproxy-router", "docker-registry", "deployer", "pod"]
+DEPLOYMENT_IMAGE_INFO = {
+ "origin": {
+ "namespace": "openshift",
+ "name": "origin",
+ "registry_console_image": "cockpit/kubernetes",
+ },
+ "openshift-enterprise": {
+ "namespace": "openshift3",
+ "name": "ose",
+ "registry_console_image": "registry.access.redhat.com/openshift3/registry-console",
+ },
+}
+
+
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
- This check attempts to ensure that required docker images are
- either present locally, or able to be pulled down from available
- registries defined in a host machine.
+ Determine docker images that an install would require and check that they
+ are either present in the host's docker index, or available for the host to pull
+ with known registries as defined in our inventory file (or defaults).
"""
name = "docker_image_availability"
tags = ["preflight"]
+ # we use python-docker-py to check local docker for images, and skopeo
+ # to look for images available remotely without waiting to pull them.
+ dependencies = ["python-docker-py", "skopeo"]
- skopeo_image = "openshift/openshift-ansible"
-
- # FIXME(juanvallejo): we should consider other possible values of
- # `deployment_type` (the key here). See
- # https://github.com/openshift/openshift-ansible/blob/8e26f8c/roles/openshift_repos/vars/main.yml#L7
- docker_image_base = {
- "origin": {
- "repo": "openshift",
- "image": "origin",
- },
- "openshift-enterprise": {
- "repo": "openshift3",
- "image": "ose",
- },
- }
-
- def run(self, tmp, task_vars):
- required_images = self.required_images(task_vars)
- missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+ def is_active(self):
+ """Skip hosts with unsupported deployment types."""
+ deployment_type = self.get_var("openshift_deployment_type")
+ has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
- # exit early if all images were found locally
- if not missing_images:
- return {"changed": False}
-
- msg, failed, changed = self.update_skopeo_image(task_vars)
+ return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type
- # exit early if Skopeo update fails
+ def run(self):
+ msg, failed, changed = self.ensure_dependencies()
if failed:
return {
"failed": True,
"changed": changed,
- "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
- registries = self.known_docker_registries(task_vars)
- available_images = self.available_images(missing_images, registries, task_vars)
+ required_images = self.required_images()
+ missing_images = set(required_images) - set(self.local_images(required_images))
+
+ # exit early if all images were found locally
+ if not missing_images:
+ return {"changed": changed}
+
+ registries = self.known_docker_registries()
+ if not registries:
+ return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
+
+ available_images = self.available_images(missing_images, registries)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
return {
"failed": True,
"msg": (
- "One or more required images are not available: {}.\n"
+ "One or more required Docker images are not available:\n {}\n"
"Configured registries: {}"
- ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)),
+ ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}
return {"changed": changed}
- def required_images(self, task_vars):
- deployment_type = get_var(task_vars, "deployment_type")
- # FIXME(juanvallejo): we should handle gracefully with a proper error
- # message when given an unexpected value for `deployment_type`.
- image_base_name = self.docker_image_base[deployment_type]
-
- openshift_release = get_var(task_vars, "openshift_release")
- # FIXME(juanvallejo): this variable is not required when the
- # installation is non-containerized. The example inventories have it
- # commented out. We should handle gracefully and with a proper error
- # message when this variable is required and not set.
- openshift_image_tag = get_var(task_vars, "openshift_image_tag")
-
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
-
- if is_containerized:
- images = set(self.containerized_docker_images(image_base_name, openshift_release))
- else:
- images = set(self.rpm_docker_images(image_base_name, openshift_release))
-
- # append images with qualified image tags to our list of required images.
- # these are images with a (v0.0.0.0) tag, rather than a standard release
- # format tag (v0.0). We want to check this set in both containerized and
- # non-containerized installations.
- images.update(
- self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag)
- )
-
- return images
-
- def local_images(self, images, task_vars):
+ def required_images(self):
+ """
+ Determine which images we expect to need for this host.
+ Returns: a set of required images like 'openshift/origin:v3.6'
+
+ The thorny issue of determining the image names from the variables is under consideration
+ via https://github.com/openshift/openshift-ansible/issues/4415
+
+ For now we operate as follows:
+ * For containerized components (master, node, ...) we look at the deployment type and
+ use openshift/origin or openshift3/ose as the base for those component images. The
+ version is openshift_image_tag as determined by the openshift_version role.
+ * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if
+ it is defined; otherwise we again use the base that depends on the deployment type.
+ Registry is not included in constructed images. It may be in oreg_url or etcd image.
+ """
+ required = set()
+ deployment_type = self.get_var("openshift_deployment_type")
+ host_groups = self.get_var("group_names")
+ # containerized etcd may not have openshift_image_tag, see bz 1466622
+ image_tag = self.get_var("openshift_image_tag", default="latest")
+ image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
+ if not image_info:
+ return required
+
+ # template for images that run on top of OpenShift
+ image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
+ image_url = self.get_var("oreg_url", default="") or image_url
+ if 'nodes' in host_groups:
+ for suffix in NODE_IMAGE_SUFFIXES:
+ required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
+ # The registry-console is for some reason not prefixed with ose- like the other components.
+ # Nor is it versioned the same, so just look for latest.
+ # Also a completely different name is used for Origin.
+ required.add(image_info["registry_console_image"])
+
+ # images for containerized components
+ if self.get_var("openshift", "common", "is_containerized"):
+ components = set()
+ if 'nodes' in host_groups:
+ components.update(["node", "openvswitch"])
+ if 'masters' in host_groups: # name is "origin" or "ose"
+ components.add(image_info["name"])
+ for component in components:
+ required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
+ if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
+ required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
+
+ return required
+
+ def local_images(self, images):
"""Filter a list of images and return those available locally."""
return [
image for image in images
- if self.is_image_local(image, task_vars)
+ if self.is_image_local(image)
]
- def is_image_local(self, image, task_vars):
- result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
+ def is_image_local(self, image):
+ """Check if image is already in local docker index."""
+ result = self.execute_module("docker_image_facts", {"name": image})
if result.get("failed", False):
return False
return bool(result.get("images", []))
- def known_docker_registries(self, task_vars):
- result = self.module_executor("docker_info", {}, task_vars)
+ def known_docker_registries(self):
+ """Build a list of docker registries available according to inventory vars."""
+ docker_facts = self.get_var("openshift", "docker")
+ regs = set(docker_facts["additional_registries"])
- if result.get("failed", False):
- return []
+ deployment_type = self.get_var("openshift_deployment_type")
+ if deployment_type == "origin":
+ regs.update(["docker.io"])
+ elif "enterprise" in deployment_type:
+ regs.update(["registry.access.redhat.com"])
- # FIXME(juanvallejo): wrong default type, result["info"] is expected to
- # contain a dictionary (see how we call `docker_info.get` below).
- docker_info = result.get("info", "")
- return [registry.get("Name", "") for registry in docker_info.get("Registries", {})]
+ return list(regs)
- def available_images(self, images, registries, task_vars):
- """Inspect existing images using Skopeo and return all images successfully inspected."""
+ def available_images(self, images, default_registries):
+ """Search remotely for images. Returns: list of images found."""
return [
image for image in images
- if self.is_image_available(image, registries, task_vars)
+ if self.is_available_skopeo_image(image, default_registries)
]
- def is_image_available(self, image, registries, task_vars):
+ def is_available_skopeo_image(self, image, default_registries):
+ """Use Skopeo to determine if required image exists in known registry(s)."""
+ registries = default_registries
+
+ # if image already includes a registry, only use that
+ if image.count("/") > 1:
+ registry, image = image.split("/", 1)
+ registries = [registry]
+
for registry in registries:
- if self.is_available_skopeo_image(image, registry, task_vars):
+ args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
+ result = self.execute_module("command", args)
+ if result.get("rc", 0) == 0 and not result.get("failed"):
return True
return False
-
- def is_available_skopeo_image(self, image, registry, task_vars):
- """Uses Skopeo to determine if required image exists in a given registry."""
-
- cmd_str = "skopeo inspect docker://{registry}/{image}".format(
- registry=registry,
- image=image,
- )
-
- args = {
- "name": "skopeo_inspect",
- "image": self.skopeo_image,
- "command": cmd_str,
- "detach": False,
- "cleanup": True,
- }
- result = self.module_executor("docker_container", args, task_vars)
- return result.get("failed", False)
-
- def containerized_docker_images(self, base_name, version):
- return [
- "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version)
- ]
-
- @staticmethod
- def rpm_docker_images(base, version):
- return [
- "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version)
- ]
-
- @staticmethod
- def qualified_docker_images(image_name, version):
- return [
- "{}-{}:{}".format(image_name, component, version)
- for component in "haproxy-router docker-registry deployer pod".split()
- ]
-
- @staticmethod
- def image_from_base_name(base):
- return "".join([base["repo"], "/", base["image"]])
-
- # ensures that the skopeo docker image exists, and updates it
- # with latest if image was already present locally.
- def update_skopeo_image(self, task_vars):
- result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars)
- return result.get("msg", ""), result.get("failed", False), result.get("changed", False)
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
new file mode 100644
index 000000000..dea15a56e
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -0,0 +1,298 @@
+"""Check Docker storage driver and usage."""
+import json
+import os.path
+import re
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks.mixins import DockerHostMixin
+
+
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
+
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
+ """
+
+ name = "docker_storage"
+ tags = ["pre-install", "health", "preflight"]
+
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay", "overlay2"]
+ max_thinpool_data_usage_percent = 90.0
+ max_thinpool_meta_usage_percent = 90.0
+ max_overlay_usage_percent = 90.0
+
+ # TODO(lmeyer): mention these in the output when check fails
+ configuration_variables = [
+ (
+ "max_thinpool_data_usage_percent",
+ "For 'devicemapper' storage driver, usage threshold percentage for data. "
+ "Format: float. Default: {:.1f}".format(max_thinpool_data_usage_percent),
+ ),
+ (
+ "max_thinpool_meta_usage_percent",
+ "For 'devicemapper' storage driver, usage threshold percentage for metadata. "
+ "Format: float. Default: {:.1f}".format(max_thinpool_meta_usage_percent),
+ ),
+ (
+ "max_overlay_usage_percent",
+ "For 'overlay' or 'overlay2' storage driver, usage threshold percentage. "
+ "Format: float. Default: {:.1f}".format(max_overlay_usage_percent),
+ ),
+ ]
+
+ def run(self):
+ msg, failed, changed = self.ensure_dependencies()
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
+ }
+
+ # attempt to get the docker info hash from the API
+ docker_info = self.execute_module("docker_info", {})
+ if docker_info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not docker_info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(docker_info))}
+ docker_info = docker_info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = docker_info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in docker_info.get("DriverStatus", [])}
+
+ result = {}
+
+ if driver == "devicemapper":
+ result = self.check_devicemapper_support(driver_status)
+
+ if driver in ['overlay', 'overlay2']:
+ result = self.check_overlay_support(docker_info, driver_status)
+
+ result['changed'] = result.get('changed', False) or changed
+ return result
+
+ def check_devicemapper_support(self, driver_status):
+ """Check if dm storage driver is supported as configured. Return: result dict."""
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "msg": msg}
+ result = self.check_dm_usage(driver_status)
+ return result
+
+ def check_dm_usage(self, driver_status):
+ """Check usage thresholds for Docker dm storage driver. Return: result dict.
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self.get_vg_free(driver_status.get("Pool Name")),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self.convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = self.get_var("max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def get_vg_free(self, pool):
+ """Determine which VG to examine according to the pool name. Return: size vgs reports.
+ Pool name is the only indicator currently available from the Docker API driver info.
+ We assume a name that looks like "vg--name-docker--pool";
+ vg and lv names with inner hyphens doubled, joined by a hyphen.
+ """
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --units g --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd})
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
+
+ @staticmethod
+ def convert_to_bytes(string):
+ """Convert string like "10.3 G" to bytes (binary units assumed). Return: float bytes."""
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
+
+ def check_overlay_support(self, docker_info, driver_status):
+ """Check if overlay storage driver is supported for this host. Return: result dict."""
+ # check for xfs as backing store
+ backing_fs = driver_status.get("Backing Filesystem", "[NONE]")
+ if backing_fs != "xfs":
+ msg = (
+ "Docker storage drivers 'overlay' and 'overlay2' are only supported with\n"
+ "'xfs' as the backing storage, but this host's storage is type '{fs}'."
+ ).format(fs=backing_fs)
+ return {"failed": True, "msg": msg}
+
+ # check support for OS and kernel version
+ o_s = docker_info.get("OperatingSystem", "[NONE]")
+ if "Red Hat Enterprise Linux" in o_s or "CentOS" in o_s:
+ # keep it simple, only check enterprise kernel versions; assume everyone else is good
+ kernel = docker_info.get("KernelVersion", "[NONE]")
+ kernel_arr = [int(num) for num in re.findall(r'\d+', kernel)]
+ if kernel_arr < [3, 10, 0, 514]: # rhel < 7.3
+ msg = (
+ "Docker storage drivers 'overlay' and 'overlay2' are only supported beginning with\n"
+ "kernel version 3.10.0-514; but Docker reports kernel version {version}."
+ ).format(version=kernel)
+ return {"failed": True, "msg": msg}
+ # NOTE: we could check for --selinux-enabled here but docker won't even start with
+ # that option until it's supported in the kernel so we don't need to.
+
+ return self.check_overlay_usage(docker_info)
+
+ def check_overlay_usage(self, docker_info):
+ """Check disk usage on OverlayFS backing store volume. Return: result dict."""
+ path = docker_info.get("DockerRootDir", "/var/lib/docker") + "/" + docker_info["Driver"]
+
+ threshold = self.get_var("max_overlay_usage_percent", default=self.max_overlay_usage_percent)
+ try:
+ threshold = float(threshold)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),
+ }
+
+ mount = self.find_ansible_mount(path, self.get_var("ansible_mounts"))
+ try:
+ free_bytes = mount['size_available']
+ total_bytes = mount['size_total']
+ usage = 100.0 * (total_bytes - free_bytes) / total_bytes
+ except (KeyError, ZeroDivisionError):
+ return {
+ "failed": True,
+ "msg": "The ansible_mount found for path {} is invalid.\n"
+ "This is likely to be an Ansible bug. The record was:\n"
+ "{}".format(path, json.dumps(mount, indent=2)),
+ }
+
+ if usage > threshold:
+ return {
+ "failed": True,
+ "msg": (
+ "For Docker OverlayFS mount point {path},\n"
+ "usage percentage {pct:.1f} is higher than threshold {thresh:.1f}."
+ ).format(path=mount["mount"], pct=usage, thresh=threshold)
+ }
+
+ return {}
+
+ # TODO(lmeyer): migrate to base class
+ @staticmethod
+ def find_ansible_mount(path, ansible_mounts):
+ """Return the mount point for path from ansible_mounts."""
+
+ mount_for_path = {mount['mount']: mount for mount in ansible_mounts}
+ mount_point = path
+ while mount_point not in mount_for_path:
+ if mount_point in ["/", ""]: # "/" not in ansible_mounts???
+ break
+ mount_point = os.path.dirname(mount_point)
+
+ try:
+ return mount_for_path[mount_point]
+ except KeyError:
+ known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) or 'none'
+ msg = 'Unable to determine mount point for path "{}". Known mount points: {}.'
+ raise OpenShiftCheckException(msg.format(path, known_mounts))
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
new file mode 100644
index 000000000..28c38504d
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
@@ -0,0 +1,85 @@
+"""
+Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.
+"""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+class EtcdImageDataSize(OpenShiftCheck):
+ """Check that total size of OpenShift image data does not exceed the recommended limit in an etcd cluster"""
+
+ name = "etcd_imagedata_size"
+ tags = ["etcd"]
+
+ def run(self):
+ etcd_mountpath = self._get_etcd_mountpath(self.get_var("ansible_mounts"))
+ etcd_avail_diskspace = etcd_mountpath["size_available"]
+ etcd_total_diskspace = etcd_mountpath["size_total"]
+
+ etcd_imagedata_size_limit = self.get_var(
+ "etcd_max_image_data_size_bytes",
+ default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace))
+ )
+
+ etcd_is_ssl = self.get_var("openshift", "master", "etcd_use_ssl", default=False)
+ etcd_port = self.get_var("openshift", "master", "etcd_port", default=2379)
+ etcd_hosts = self.get_var("openshift", "master", "etcd_hosts")
+
+ config_base = self.get_var("openshift", "common", "config_base")
+
+ cert = self.get_var("etcd_client_cert", default=config_base + "/master/master.etcd-client.crt")
+ key = self.get_var("etcd_client_key", default=config_base + "/master/master.etcd-client.key")
+ ca_cert = self.get_var("etcd_client_ca_cert", default=config_base + "/master/master.etcd-ca.crt")
+
+ for etcd_host in list(etcd_hosts):
+ args = {
+ "size_limit_bytes": etcd_imagedata_size_limit,
+ "paths": ["/openshift.io/images", "/openshift.io/imagestreams"],
+ "host": etcd_host,
+ "port": etcd_port,
+ "protocol": "https" if etcd_is_ssl else "http",
+ "version_prefix": "/v2",
+ "allow_redirect": True,
+ "ca_cert": ca_cert,
+ "cert": {
+ "cert": cert,
+ "key": key,
+ },
+ }
+
+ etcdkeysize = self.execute_module("etcdkeysize", args)
+
+ if etcdkeysize.get("rc", 0) != 0 or etcdkeysize.get("failed"):
+ msg = 'Failed to retrieve stats for etcd host "{host}": {reason}'
+ reason = etcdkeysize.get("msg")
+ if etcdkeysize.get("module_stderr"):
+ reason = etcdkeysize["module_stderr"]
+
+ msg = msg.format(host=etcd_host, reason=reason)
+ return {"failed": True, "changed": False, "msg": msg}
+
+ if etcdkeysize["size_limit_exceeded"]:
+ limit = self._to_gigabytes(etcd_imagedata_size_limit)
+ msg = ("The size of OpenShift image data stored in etcd host "
+ "\"{host}\" exceeds the maximum recommended limit of {limit:.2f} GB. "
+ "Use the `oadm prune images` command to cleanup unused Docker images.")
+ return {"failed": True, "msg": msg.format(host=etcd_host, limit=limit)}
+
+ return {"changed": False}
+
+ @staticmethod
+ def _get_etcd_mountpath(ansible_mounts):
+ valid_etcd_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
+
+ mount_for_path = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+ for path in valid_etcd_mount_paths:
+ if path in mount_for_path:
+ return mount_for_path[path]
+
+ paths = ', '.join(sorted(mount_for_path)) or 'none'
+ msg = "Unable to determine a valid etcd mountpath. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
+
+ @staticmethod
+ def _to_gigabytes(byte_size):
+ return float(byte_size) / 10.0**9
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
new file mode 100644
index 000000000..cc1b14d8a
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -0,0 +1,44 @@
+"""Check that scans journalctl for messages caused as a symptom of increased etcd traffic."""
+
+from openshift_checks import OpenShiftCheck
+
+
+class EtcdTraffic(OpenShiftCheck):
+ """Check if host is being affected by an increase in etcd traffic."""
+
+ name = "etcd_traffic"
+ tags = ["health", "etcd"]
+
+ def is_active(self):
+ """Skip hosts that do not have etcd in their group names."""
+ group_names = self.get_var("group_names", default=[])
+ valid_group_names = "etcd" in group_names
+
+ version = self.get_var("openshift", "common", "short_version")
+ valid_version = version in ("3.4", "3.5", "1.4", "1.5")
+
+ return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
+
+ def run(self):
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
+ unit = "etcd_container" if is_containerized else "etcd"
+
+ log_matchers = [{
+ "start_regexp": r"Starting Etcd Server",
+ "regexp": r"etcd: sync duration of [^,]+, expected less than 1s",
+ "unit": unit
+ }]
+
+ match = self.execute_module("search_journalctl", {"log_matchers": log_matchers})
+
+ if match.get("matched"):
+ msg = ("Higher than normal etcd traffic detected.\n"
+ "OpenShift 3.4 introduced an increase in etcd traffic.\n"
+ "Upgrading to OpenShift 3.6 is recommended in order to fix this issue.\n"
+ "Please refer to https://access.redhat.com/solutions/2916381 for more information.")
+ return {"failed": True, "msg": msg}
+
+ if match.get("failed"):
+ return {"failed": True, "msg": "\n".join(match.get("errors"))}
+
+ return {}
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
new file mode 100644
index 000000000..da7d0364a
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -0,0 +1,55 @@
+"""A health check for OpenShift clusters."""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+class EtcdVolume(OpenShiftCheck):
+ """Ensures etcd storage usage does not exceed a given threshold."""
+
+ name = "etcd_volume"
+ tags = ["etcd", "health"]
+
+ # Default device usage threshold. Value should be in the range [0, 100].
+ default_threshold_percent = 90
+ # Where to find ectd data, higher priority first.
+ supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
+
+ def is_active(self):
+ etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
+ is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+ return super(EtcdVolume, self).is_active() and is_etcd_host
+
+ def run(self):
+ mount_info = self._etcd_mount_info()
+ available = mount_info["size_available"]
+ total = mount_info["size_total"]
+ used = total - available
+
+ threshold = self.get_var(
+ "etcd_device_usage_threshold_percent",
+ default=self.default_threshold_percent
+ )
+
+ used_percent = 100.0 * used / total
+
+ if used_percent > threshold:
+ device = mount_info.get("device", "unknown")
+ mount = mount_info.get("mount", "unknown")
+ msg = "etcd storage usage ({:.1f}%) is above threshold ({:.1f}%). Device: {}, mount: {}.".format(
+ used_percent, threshold, device, mount
+ )
+ return {"failed": True, "msg": msg}
+
+ return {"changed": False}
+
+ def _etcd_mount_info(self):
+ ansible_mounts = self.get_var("ansible_mounts")
+ mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+
+ for path in self.supported_mount_paths:
+ if path in mounts:
+ return mounts[path]
+
+ paths = ', '.join(sorted(mounts)) or 'none'
+ msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/__init__.py b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
diff --git a/roles/openshift_health_checker/openshift_checks/logging/curator.py b/roles/openshift_health_checker/openshift_checks/logging/curator.py
new file mode 100644
index 000000000..f82ae64d7
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/curator.py
@@ -0,0 +1,54 @@
+"""Check for an aggregated logging Curator deployment"""
+
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Curator(LoggingCheck):
+ """Check for an aggregated logging Curator deployment"""
+
+ name = "curator"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self):
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ curator_pods, error = super(Curator, self).get_pods_for_component(
+ self.logging_namespace,
+ "curator",
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_curator(curator_pods)
+
+ if check_error:
+ msg = ("The following Curator deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
+
+ def check_curator(self, pods):
+ """Check to see if curator is up and working. Returns: error string"""
+ if not pods:
+ return (
+ "There are no Curator pods for the logging stack,\n"
+ "so nothing will prune Elasticsearch indexes.\n"
+ "Is Curator correctly deployed?"
+ )
+
+ not_running = super(Curator, self).not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return (
+ "The Curator pod is not currently in a running state,\n"
+ "so Elasticsearch indexes may increase without bound."
+ )
+ if len(pods) - len(not_running) > 1:
+ return (
+ "There is more than one Curator pod running. This should not normally happen.\n"
+ "Although this doesn't cause any problems, you may want to investigate."
+ )
+
+ return None
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
new file mode 100644
index 000000000..1e478c04d
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -0,0 +1,210 @@
+"""Check for an aggregated logging Elasticsearch deployment"""
+
+import json
+import re
+
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Elasticsearch(LoggingCheck):
+ """Check for an aggregated logging Elasticsearch deployment"""
+
+ name = "elasticsearch"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ es_pods, error = super(Elasticsearch, self).get_pods_for_component(
+ self.logging_namespace,
+ "es",
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_elasticsearch(es_pods)
+
+ if check_error:
+ msg = ("The following Elasticsearch deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
+
+ def _not_running_elasticsearch_pods(self, es_pods):
+ """Returns: list of pods that are not running, list of errors about non-running pods"""
+ not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+ if not_running:
+ return not_running, [(
+ 'The following Elasticsearch pods are not running:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))]
+ return not_running, []
+
+ def check_elasticsearch(self, es_pods):
+ """Various checks for elasticsearch. Returns: error string"""
+ not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
+ running_pods = [pod for pod in es_pods if pod not in not_running_pods]
+ pods_by_name = {
+ pod['metadata']['name']: pod for pod in running_pods
+ # Filter out pods that are not members of a DC
+ if pod['metadata'].get('labels', {}).get('deploymentconfig')
+ }
+ if not pods_by_name:
+ return 'No logging Elasticsearch pods were found. Is logging deployed?'
+ error_msgs += self._check_elasticsearch_masters(pods_by_name)
+ error_msgs += self._check_elasticsearch_node_list(pods_by_name)
+ error_msgs += self._check_es_cluster_health(pods_by_name)
+ error_msgs += self._check_elasticsearch_diskspace(pods_by_name)
+ return '\n'.join(error_msgs)
+
+ @staticmethod
+ def _build_es_curl_cmd(pod_name, url):
+ base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
+ return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
+
+ def _check_elasticsearch_masters(self, pods_by_name):
+ """Check that Elasticsearch masters are sane. Returns: list of error strings"""
+ es_master_names = set()
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ # Compare what each ES node reports as master and compare for split brain
+ get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
+ master_name_str = self._exec_oc(get_master_cmd, [])
+ master_names = (master_name_str or '').split(' ')
+ if len(master_names) > 1:
+ es_master_names.add(master_names[1])
+ else:
+ error_msgs.append(
+ 'No master? Elasticsearch {pod} returned bad string when asked master name:\n'
+ ' {response}'.format(pod=pod_name, response=master_name_str)
+ )
+
+ if not es_master_names:
+ error_msgs.append('No logging Elasticsearch masters were found. Is logging deployed?')
+ return '\n'.join(error_msgs)
+
+ if len(es_master_names) > 1:
+ error_msgs.append(
+ 'Found multiple Elasticsearch masters according to the pods:\n'
+ '{master_list}\n'
+ 'This implies that the masters have "split brain" and are not correctly\n'
+ 'replicating data for the logging cluster. Log loss is likely to occur.'
+ .format(master_list='\n'.join(' ' + master for master in es_master_names))
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_node_list(self, pods_by_name):
+ """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
+
+ if not pods_by_name:
+ return ['No logging Elasticsearch masters were found. Is logging deployed?']
+
+ # get ES cluster nodes
+ node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
+ cluster_node_data = self._exec_oc(node_cmd, [])
+ try:
+ cluster_nodes = json.loads(cluster_node_data)['nodes']
+ except (ValueError, KeyError):
+ return [
+ 'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' +
+ cluster_node_data
+ ]
+
+ # Try to match all ES-reported node hosts to known pods.
+ error_msgs = []
+ for node in cluster_nodes.values():
+ # Note that with 1.4/3.4 the pod IP may be used as the master name
+ if not any(node['host'] in (pod_name, pod['status'].get('podIP'))
+ for pod_name, pod in pods_by_name.items()):
+ error_msgs.append(
+ 'The Elasticsearch cluster reports a member node "{node}"\n'
+ 'that does not correspond to any known ES pod.'.format(node=node['host'])
+ )
+
+ return error_msgs
+
+ def _check_es_cluster_health(self, pods_by_name):
+ """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
+ cluster_health_data = self._exec_oc(cluster_health_cmd, [])
+ try:
+ health_res = json.loads(cluster_health_data)
+ if not health_res or not health_res.get('status'):
+ raise ValueError()
+ except ValueError:
+ error_msgs.append(
+ 'Could not retrieve cluster health status from logging ES pod "{pod}".\n'
+ 'Response was:\n{output}'.format(pod=pod_name, output=cluster_health_data)
+ )
+ continue
+
+ if health_res['status'] not in ['green', 'yellow']:
+ error_msgs.append(
+ 'Elasticsearch cluster health status is RED according to pod "{}"'.format(pod_name)
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_diskspace(self, pods_by_name):
+ """
+ Exec into an ES pod and query the diskspace on the persistent volume.
+ Returns: list of errors
+ """
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ disk_output = self._exec_oc(df_cmd, [])
+ lines = disk_output.splitlines()
+ # expecting one header looking like 'IUse% Use%' and one body line
+ body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
+ if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(body_re, lines[1]):
+ error_msgs.append(
+ 'Could not retrieve storage usage from logging ES pod "{pod}".\n'
+ 'Response to `df` command was:\n{output}'.format(pod=pod_name, output=disk_output)
+ )
+ continue
+ inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
+
+ inode_pct_thresh = self.get_var('openshift_check_efk_es_inode_pct', default='90')
+ if int(inode_pct) >= int(inode_pct_thresh):
+ error_msgs.append(
+ 'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(inode_pct),
+ limit=str(inode_pct_thresh),
+ param='openshift_check_efk_es_inode_pct',
+ ))
+ disk_pct_thresh = self.get_var('openshift_check_efk_es_storage_pct', default='80')
+ if int(disk_pct) >= int(disk_pct_thresh):
+ error_msgs.append(
+ 'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(disk_pct),
+ limit=str(disk_pct_thresh),
+ param='openshift_check_efk_es_storage_pct',
+ ))
+
+ return error_msgs
+
+ def _exec_oc(self, cmd_str, extra_args):
+ return super(Elasticsearch, self).exec_oc(
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
new file mode 100644
index 000000000..063e707a9
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
@@ -0,0 +1,167 @@
+"""Check for an aggregated logging Fluentd deployment"""
+
+import json
+
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Fluentd(LoggingCheck):
+ """Check for an aggregated logging Fluentd deployment"""
+
+ name = "fluentd"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
+ self.logging_namespace,
+ "fluentd",
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_fluentd(fluentd_pods)
+
+ if check_error:
+ msg = ("The following Fluentd deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
+
+ @staticmethod
+ def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
+ """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
+ label, value = node_selector.split('=', 1)
+ fluentd_nodes = {
+ name: node for name, node in nodes_by_name.items()
+ if node['metadata']['labels'].get(label) == value
+ }
+ if not fluentd_nodes:
+ return None, (
+ 'There are no nodes with the fluentd label {label}.\n'
+ 'This means no logs will be aggregated from the nodes.'
+ ).format(label=node_selector)
+ return fluentd_nodes, None
+
+ def _check_node_labeling(self, nodes_by_name, fluentd_nodes, node_selector):
+ """Note if nodes are not labeled as expected. Returns: error string"""
+ intended_nodes = self.get_var('openshift_logging_fluentd_hosts', default=['--all'])
+ if not intended_nodes or '--all' in intended_nodes:
+ intended_nodes = nodes_by_name.keys()
+ nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
+ if nodes_missing_labels:
+ return (
+ 'The following nodes are supposed to be labeled with {label} but are not:\n'
+ ' {nodes}\n'
+ 'Fluentd will not aggregate logs from these nodes.'
+ ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
+ return None
+
+ @staticmethod
+ def _check_nodes_have_fluentd(pods, fluentd_nodes):
+ """Make sure fluentd is on all the labeled nodes. Returns: error string"""
+ unmatched_nodes = fluentd_nodes.copy()
+ node_names_by_label = {
+ node['metadata']['labels']['kubernetes.io/hostname']: name
+ for name, node in fluentd_nodes.items()
+ }
+ node_names_by_internal_ip = {
+ address['address']: name
+ for name, node in fluentd_nodes.items()
+ for address in node['status']['addresses']
+ if address['type'] == "InternalIP"
+ }
+ for pod in pods:
+ for name in [
+ pod['spec']['nodeName'],
+ node_names_by_internal_ip.get(pod['spec']['nodeName']),
+ node_names_by_label.get(pod.get('spec', {}).get('host')),
+ ]:
+ unmatched_nodes.pop(name, None)
+ if unmatched_nodes:
+ return (
+ 'The following nodes are supposed to have a Fluentd pod but do not:\n'
+ '{nodes}'
+ 'These nodes will not have their logs aggregated.'
+ ).format(nodes=''.join(
+ " {}\n".format(name)
+ for name in unmatched_nodes.keys()
+ ))
+ return None
+
+ def _check_fluentd_pods_running(self, pods):
+ """Make sure all fluentd pods are running. Returns: error string"""
+ not_running = super(Fluentd, self).not_running_pods(pods)
+ if not_running:
+ return (
+ 'The following Fluentd pods are supposed to be running but are not:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))
+ return None
+
+ def check_fluentd(self, pods):
+ """Verify fluentd is running everywhere. Returns: error string"""
+
+ node_selector = self.get_var(
+ 'openshift_logging_fluentd_nodeselector',
+ default='logging-infra-fluentd=true'
+ )
+
+ nodes_by_name, error = self.get_nodes_by_name()
+
+ if error:
+ return error
+ fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
+ if error:
+ return error
+
+ error_msgs = []
+ error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector)
+ if error:
+ error_msgs.append(error)
+ error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
+ if error:
+ error_msgs.append(error)
+ error = self._check_fluentd_pods_running(pods)
+ if error:
+ error_msgs.append(error)
+
+ # Make sure there are no extra fluentd pods
+ if len(pods) > len(fluentd_nodes):
+ error_msgs.append(
+ 'There are more Fluentd pods running than nodes labeled.\n'
+ 'This may not cause problems with logging but it likely indicates something wrong.'
+ )
+
+ return '\n'.join(error_msgs)
+
+ def get_nodes_by_name(self):
+ """Retrieve all the node definitions. Returns: dict(name: node), error string"""
+ nodes_json = self._exec_oc("get nodes -o json", [])
+ try:
+ nodes = json.loads(nodes_json)
+ except ValueError: # no valid json - should not happen
+ return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
+ if not nodes or not nodes.get('items'): # also should not happen
+ return None, "No nodes appear to be defined according to the API."
+ return {
+ node['metadata']['name']: node
+ for node in nodes['items']
+ }, None
+
+ def _exec_oc(self, cmd_str, extra_args):
+ return super(Fluentd, self).exec_oc(
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
new file mode 100644
index 000000000..60f94e106
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -0,0 +1,226 @@
+"""
+Module for performing checks on a Kibana logging deployment
+"""
+
+import json
+import ssl
+
+try:
+ from urllib2 import HTTPError, URLError
+ import urllib2
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Kibana(LoggingCheck):
+ """Module that checks an integrated logging Kibana deployment"""
+
+ name = "kibana"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ kibana_pods, error = super(Kibana, self).get_pods_for_component(
+ self.logging_namespace,
+ "kibana",
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_kibana(kibana_pods)
+
+ if not check_error:
+ check_error = self._check_kibana_route()
+
+ if check_error:
+ msg = ("The following Kibana deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
+
+ def _verify_url_internal(self, url):
+ """
+ Try to reach a URL from the host.
+ Returns: success (bool), reason (for failure)
+ """
+ args = dict(
+ url=url,
+ follow_redirects='none',
+ validate_certs='no', # likely to be signed with internal CA
+ # TODO(lmeyer): give users option to validate certs
+ status_code=302,
+ )
+ result = self.execute_module('uri', args)
+ if result.get('failed'):
+ return result['msg']
+ return None
+
+ @staticmethod
+ def _verify_url_external(url):
+ """
+ Try to reach a URL from ansible control host.
+ Returns: success (bool), reason (for failure)
+ """
+ # This actually checks from the ansible control host, which may or may not
+ # really be "external" to the cluster.
+
+ # Disable SSL cert validation to work around internally signed certs
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False # or setting CERT_NONE is refused
+ ctx.verify_mode = ssl.CERT_NONE
+
+ # Verify that the url is returning a valid response
+ try:
+ # We only care if the url connects and responds
+ return_code = urllib2.urlopen(url, context=ctx).getcode()
+ except HTTPError as httperr:
+ return httperr.reason
+ except URLError as urlerr:
+ return str(urlerr)
+
+ # there appears to be no way to prevent urlopen from following redirects
+ if return_code != 200:
+ return 'Expected success (200) but got return code {}'.format(int(return_code))
+
+ return None
+
+ def check_kibana(self, pods):
+ """Check to see if Kibana is up and working. Returns: error string."""
+
+ if not pods:
+ return "There are no Kibana pods deployed, so no access to the logging UI."
+
+ not_running = self.not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return "No Kibana pod is in a running state, so there is no access to the logging UI."
+ elif not_running:
+ return (
+ "The following Kibana pods are not currently in a running state:\n"
+ "{pods}"
+ "However at least one is, so service may not be impacted."
+ ).format(pods="".join(" " + pod['metadata']['name'] + "\n" for pod in not_running))
+
+ return None
+
+ def _get_kibana_url(self):
+ """
+ Get kibana route or report error.
+ Returns: url (or empty), reason for failure
+ """
+
+ # Get logging url
+ get_route = self._exec_oc("get route logging-kibana -o json", [])
+ if not get_route:
+ return None, 'no_route_exists'
+
+ route = json.loads(get_route)
+
+ # check that the route has been accepted by a router
+ ingress = route["status"]["ingress"]
+ # ingress can be null if there is no router, or empty if not routed
+ if not ingress or not ingress[0]:
+ return None, 'route_not_accepted'
+
+ host = route.get("spec", {}).get("host")
+ if not host:
+ return None, 'route_missing_host'
+
+ return 'https://{}/'.format(host), None
+
+ def _check_kibana_route(self):
+ """
+ Check to see if kibana route is up and working.
+ Returns: error string
+ """
+ known_errors = dict(
+ no_route_exists=(
+ 'No route is defined for Kibana in the logging namespace,\n'
+ 'so the logging stack is not accessible. Is logging deployed?\n'
+ 'Did something remove the logging-kibana route?'
+ ),
+ route_not_accepted=(
+ 'The logging-kibana route is not being routed by any router.\n'
+ 'Is the router deployed and working?'
+ ),
+ route_missing_host=(
+ 'The logging-kibana route has no hostname defined,\n'
+ 'which should never happen. Did something alter its definition?'
+ ),
+ )
+
+ kibana_url, error = self._get_kibana_url()
+ if not kibana_url:
+ return known_errors.get(error, error)
+
+ # first, check that kibana is reachable from the master.
+ error = self._verify_url_internal(kibana_url)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'Is kibana running, and is at least one router routing to it?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'because the hostname does not resolve.\n'
+ 'Is DNS configured for the Kibana hostname?'
+ ).format(url=kibana_url)
+ elif 'Status code was not' in error:
+ error = (
+ 'A request from this master to the Kibana URL {url}\n'
+ 'did not return the correct status code (302).\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues. The output was:\n'
+ ' {error}'
+ ).format(url=kibana_url, error=error)
+ return 'Error validating the logging Kibana route:\n' + error
+
+ # in production we would like the kibana route to work from outside the
+ # cluster too; but that may not be the case, so allow disabling just this part.
+ if not self.get_var("openshift_check_efk_kibana_external", default=True):
+ return None
+ error = self._verify_url_external(kibana_url)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from the Ansible control host to Kibana URL {url}\n'
+ 'Is the router for the Kibana hostname exposed externally?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to resolve the Kibana hostname in {url}\n'
+ 'from the Ansible control host.\n'
+ 'Is DNS configured to resolve this Kibana hostname externally?'
+ ).format(url=kibana_url)
+ elif 'Expected success (200)' in error:
+ error = (
+ 'A request to Kibana at {url}\n'
+ 'returned the wrong error code:\n'
+ ' {error}\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues.'
+ ).format(url=kibana_url, error=error)
+ error = (
+ 'Error validating the logging Kibana route:\n{error}\n'
+ 'To disable external Kibana route validation, set in your inventory:\n'
+ ' openshift_check_efk_kibana_external=False'
+ ).format(error=error)
+ return error
+ return None
+
+ def _exec_oc(self, cmd_str, extra_args):
+ return super(Kibana, self).exec_oc(
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
new file mode 100644
index 000000000..a48e1c728
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -0,0 +1,92 @@
+"""
+Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana stack
+"""
+
+import json
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+class LoggingCheck(OpenShiftCheck):
+ """Base class for OpenShift aggregated logging component checks"""
+
+ name = "logging"
+ logging_namespace = "logging"
+
+ def is_active(self):
+ logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)
+ return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
+
+ def is_first_master(self):
+ """Determine if running on first master. Returns: bool"""
+ # Note: It would be nice to use membership in oo_first_master group, however for now it
+ # seems best to avoid requiring that setup and just check this is the first master.
+ hostname = self.get_var("ansible_ssh_host") or [None]
+ masters = self.get_var("groups", "masters", default=None) or [None]
+ return masters[0] == hostname
+
+ def run(self):
+ pass
+
+ def get_pods_for_component(self, namespace, logging_component):
+ """Get all pods for a given component. Returns: list of pods for component, error string"""
+ pod_output = self.exec_oc(
+ namespace,
+ "get pods -l component={} -o json".format(logging_component),
+ [],
+ )
+ try:
+ pods = json.loads(pod_output)
+ if not pods or not pods.get('items'):
+ raise ValueError()
+ except ValueError:
+ # successful run but non-parsing data generally means there were no pods in the namespace
+ return None, 'No pods were found for the "{}" logging component.'.format(logging_component)
+
+ return pods['items'], None
+
+ @staticmethod
+ def not_running_pods(pods):
+ """Returns: list of pods not in a ready and running state"""
+ return [
+ pod for pod in pods
+ if not pod.get("status", {}).get("containerStatuses") or any(
+ container['ready'] is False
+ for container in pod['status']['containerStatuses']
+ ) or not any(
+ condition['type'] == 'Ready' and condition['status'] == 'True'
+ for condition in pod['status'].get('conditions', [])
+ )
+ ]
+
+ def exec_oc(self, namespace="logging", cmd_str="", extra_args=None):
+ """
+ Execute an 'oc' command in the remote host.
+ Returns: output of command and namespace,
+ or raises OpenShiftCheckException on error
+ """
+ config_base = self.get_var("openshift", "common", "config_base")
+ args = {
+ "namespace": namespace,
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": cmd_str,
+ "extra_args": list(extra_args) if extra_args else [],
+ }
+
+ result = self.execute_module("ocutil", args)
+ if result.get("failed"):
+ msg = (
+ 'Unexpected error using `oc` to validate the logging stack components.\n'
+ 'Error executing `oc {cmd}`:\n'
+ '{error}'
+ ).format(cmd=args['cmd'], error=result['result'])
+
+ if result['result'] == '[Errno 2] No such file or directory':
+ msg = (
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+ raise OpenShiftCheckException(msg)
+
+ return result.get("result", "")
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
new file mode 100644
index 000000000..b24e88e05
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -0,0 +1,130 @@
+"""
+Check for ensuring logs from pods can be queried in a reasonable amount of time.
+"""
+
+import json
+import time
+
+from uuid import uuid4
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.logging.logging import LoggingCheck
+
+
+ES_CMD_TIMEOUT_SECONDS = 30
+
+
+class LoggingIndexTime(LoggingCheck):
+ """Check that pod logs are aggregated and indexed in ElasticSearch within a reasonable amount of time."""
+ name = "logging_index_time"
+ tags = ["health", "logging"]
+
+ logging_namespace = "logging"
+
+ def run(self):
+ """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs."""
+ try:
+ log_index_timeout = int(
+ self.get_var("openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
+ )
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": ('Invalid value provided for "openshift_check_logging_index_timeout_seconds". '
+ 'Value must be an integer representing an amount in seconds.'),
+ }
+
+ running_component_pods = dict()
+
+ # get all component pods
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
+ for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']):
+ pods, error = self.get_pods_for_component(self.logging_namespace, component)
+
+ if error:
+ msg = 'Unable to retrieve pods for the {} logging component: {}'
+ return {"failed": True, "changed": False, "msg": msg.format(name, error)}
+
+ running_pods = self.running_pods(pods)
+
+ if not running_pods:
+ msg = ('No {} pods in the "Running" state were found.'
+ 'At least one pod is required in order to perform this check.')
+ return {"failed": True, "changed": False, "msg": msg.format(name)}
+
+ running_component_pods[component] = running_pods
+
+ uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0])
+ self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout)
+ return {}
+
+ def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs):
+ """Retry an Elasticsearch query every second until query success, or a defined
+ length of time has passed."""
+ deadline = time.time() + timeout_secs
+ interval = 1
+ while not self.query_es_from_es(es_pod, uuid):
+ if time.time() + interval > deadline:
+ msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s."
+ raise OpenShiftCheckException(msg.format(uuid, timeout_secs))
+ time.sleep(interval)
+
+ def curl_kibana_with_uuid(self, kibana_pod):
+ """curl Kibana with a unique uuid."""
+ uuid = self.generate_uuid()
+ pod_name = kibana_pod["metadata"]["name"]
+ exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
+ exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)
+
+ error_str = self.exec_oc(self.logging_namespace, exec_cmd, [])
+
+ try:
+ error_code = json.loads(error_str)["statusCode"]
+ except KeyError:
+ msg = ('invalid response returned from Kibana request (Missing "statusCode" key):\n'
+ 'Command: {}\nResponse: {}').format(exec_cmd, error_str)
+ raise OpenShiftCheckException(msg)
+ except ValueError:
+ msg = ('invalid response returned from Kibana request (Non-JSON output):\n'
+ 'Command: {}\nResponse: {}').format(exec_cmd, error_str)
+ raise OpenShiftCheckException(msg)
+
+ if error_code != 404:
+ msg = 'invalid error code returned from Kibana request. Expecting error code "404", but got "{}" instead.'
+ raise OpenShiftCheckException(msg.format(error_code))
+
+ return uuid
+
+ def query_es_from_es(self, es_pod, uuid):
+ """curl the Elasticsearch pod and look for a unique uuid in its logs."""
+ pod_name = es_pod["metadata"]["name"]
+ exec_cmd = (
+ "exec {pod_name} -- curl --max-time 30 -s -f "
+ "--cacert /etc/elasticsearch/secret/admin-ca "
+ "--cert /etc/elasticsearch/secret/admin-cert "
+ "--key /etc/elasticsearch/secret/admin-key "
+ "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
+ )
+ exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid)
+ result = self.exec_oc(self.logging_namespace, exec_cmd, [])
+
+ try:
+ count = json.loads(result)["count"]
+ except KeyError:
+ msg = 'invalid response from Elasticsearch query:\n"{}"\nMissing "count" key:\n{}'
+ raise OpenShiftCheckException(msg.format(exec_cmd, result))
+ except ValueError:
+ msg = 'invalid response from Elasticsearch query:\n"{}"\nNon-JSON output:\n{}'
+ raise OpenShiftCheckException(msg.format(exec_cmd, result))
+
+ return count
+
+ @staticmethod
+ def running_pods(pods):
+ """Filter pods that are running."""
+ return [pod for pod in pods if pod['status']['phase'] == 'Running']
+
+ @staticmethod
+ def generate_uuid():
+ """Wrap uuid generator. Allows for testing with expected values."""
+ return str(uuid4())
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
new file mode 100644
index 000000000..765ba072d
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -0,0 +1,50 @@
+"""Check that recommended memory is available."""
+from openshift_checks import OpenShiftCheck
+
+MIB = 2**20
+GIB = 2**30
+
+
+class MemoryAvailability(OpenShiftCheck):
+ """Check that recommended memory is available."""
+
+ name = "memory_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_memory_bytes = {
+ "masters": 16 * GIB,
+ "nodes": 8 * GIB,
+ "etcd": 8 * GIB,
+ }
+ # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
+ memtotal_adjustment = 1 * GIB
+
+ def is_active(self):
+ """Skip hosts that do not have recommended memory requirements."""
+ group_names = self.get_var("group_names", default=[])
+ has_memory_recommendation = bool(set(group_names).intersection(self.recommended_memory_bytes))
+ return super(MemoryAvailability, self).is_active() and has_memory_recommendation
+
+ def run(self):
+ group_names = self.get_var("group_names")
+ total_memory_bytes = self.get_var("ansible_memtotal_mb") * MIB
+
+ recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+ configured_min = float(self.get_var("openshift_check_min_host_memory_gb", default=0)) * GIB
+ min_memory_bytes = configured_min or recommended_min
+
+ if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available memory ({available:.1f} GiB) is too far '
+ 'below recommended value ({recommended:.1f} GiB)'
+ ).format(
+ available=float(total_memory_bytes) / GIB,
+ recommended=float(min_memory_bytes) / GIB,
+ ),
+ }
+
+ return {}
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 657e15160..3b2c64e6a 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,18 +1,53 @@
-# pylint: disable=missing-docstring
-from openshift_checks import get_var
+"""
+Mixin classes meant to be used with subclasses of OpenShiftCheck.
+"""
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
- @classmethod
- def is_active(cls, task_vars):
- return (
- # This mixin is meant to be used with subclasses of OpenShiftCheck.
- super(NotContainerizedMixin, cls).is_active(task_vars) and
- not cls.is_containerized(task_vars)
- )
+ def is_active(self):
+ """Only run on non-containerized hosts."""
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
+ return super(NotContainerizedMixin, self).is_active() and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
- @staticmethod
- def is_containerized(task_vars):
- return get_var(task_vars, "openshift", "common", "is_containerized")
+ def is_active(self):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
+ is_node = "nodes" in self.get_var("group_names", default=[])
+ return super(DockerHostMixin, self).is_active() and (is_containerized or is_node)
+
+ def ensure_dependencies(self):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if self.get_var("openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ result = self.execute_module(
+ self.get_var("ansible_pkg_mgr", default="yum"),
+ {"name": self.dependencies, "state": "present"},
+ )
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ failed = result.get("failed", False) or result.get("rc", 0) != 0
+ changed = result.get("changed", False)
+ return msg, failed, changed
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
new file mode 100644
index 000000000..cd6ebd493
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -0,0 +1,77 @@
+"""
+Ansible module for determining if an installed version of Open vSwitch is incompatible with the
+currently installed version of OpenShift.
+"""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
+ """Check that packages in a package_list are installed on the host
+ and are the correct version as determined by an OpenShift installation.
+ """
+
+ name = "ovs_version"
+ tags = ["health"]
+
+ openshift_to_ovs_version = {
+ "3.6": "2.6",
+ "3.5": "2.6",
+ "3.4": "2.4",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
+ def is_active(self):
+ """Skip hosts that do not have package requirements."""
+ group_names = self.get_var("group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(OvsVersion, self).is_active() and master_or_node
+
+ def run(self):
+ args = {
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(),
+ },
+ ],
+ }
+ return self.execute_module("rpm_version", args)
+
+ def get_required_ovs_version(self):
+ """Return the correct Open vSwitch version for the current OpenShift version"""
+ openshift_version = self._get_openshift_version()
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return self.openshift_to_ovs_version[str(openshift_version)]
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def _get_openshift_version(self):
+ openshift_version = self.get_var("openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self._parse_version(openshift_version)
+
+ def _parse_version(self, version):
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 9891972a6..a86180b00 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -1,5 +1,6 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+"""Check that required RPM packages are available."""
+
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -9,9 +10,13 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
name = "package_availability"
tags = ["preflight"]
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- group_names = get_var(task_vars, "group_names", default=[])
+ def is_active(self):
+ """Run only when yum is the package manager as the code is specific to it."""
+ return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
+
+ def run(self):
+ rpm_prefix = self.get_var("openshift", "common", "service_type")
+ group_names = self.get_var("group_names", default=[])
packages = set()
@@ -21,10 +26,11 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
+ """Return a list of RPMs that we expect a master install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-clients".format(rpm_prefix=rpm_prefix),
@@ -32,8 +38,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"bash-completion",
"cockpit-bridge",
"cockpit-docker",
- "cockpit-kubernetes",
- "cockpit-shell",
+ "cockpit-system",
"cockpit-ws",
"etcd",
"httpd-tools",
@@ -41,6 +46,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
@staticmethod
def node_packages(rpm_prefix):
+ """Return a list of RPMs that we expect a node install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-node".format(rpm_prefix=rpm_prefix),
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index fd0c0a755..1e9aecbe0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -1,14 +1,14 @@
-# pylint: disable=missing-docstring
+"""Check that a yum update would not run into conflicts with available packages."""
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
- """Check that there are no conflicts in RPM packages."""
+ """Check that a yum update would not run into conflicts with available packages."""
name = "package_update"
tags = ["preflight"]
- def run(self, tmp, task_vars):
+ def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 42193a1c6..020786804 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,5 +1,5 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+"""Check that available RPM packages match the required versions."""
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks.mixins import NotContainerizedMixin
@@ -9,12 +9,118 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- openshift_release = get_var(task_vars, "openshift_release")
+ openshift_to_ovs_version = {
+ "3.6": ["2.6", "2.7"],
+ "3.5": ["2.6", "2.7"],
+ "3.4": "2.4",
+ }
+
+ openshift_to_docker_version = {
+ "3.1": "1.8",
+ "3.2": "1.10",
+ "3.3": "1.10",
+ "3.4": "1.12",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
+ def is_active(self):
+ """Skip hosts that do not have package requirements."""
+ group_names = self.get_var("group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(PackageVersion, self).is_active() and master_or_node
+
+ def run(self):
+ rpm_prefix = self.get_var("openshift", "common", "service_type")
+ openshift_release = self.get_var("openshift_release", default='')
+ deployment_type = self.get_var("openshift_deployment_type")
+ check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
- "prefix": rpm_prefix,
- "version": openshift_release,
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(),
+ "check_multi": False,
+ },
+ {
+ "name": "docker",
+ "version": self.get_required_docker_version(),
+ "check_multi": False,
+ },
+ {
+ "name": "{}".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-master".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-node".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ ],
}
- return self.execute_module("aos_version", args, tmp, task_vars)
+
+ return self.execute_module("aos_version", args)
+
+ def get_required_ovs_version(self):
+ """Return the correct Open vSwitch version for the current OpenShift version.
+ If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
+ Else ensure Open vSwitch version 2.4"""
+ openshift_version = self.get_openshift_version()
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return ovs_version
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_required_docker_version(self):
+ """Return the correct Docker version for the current OpenShift version.
+ If the OpenShift version is 3.1, ensure Docker version 1.8.
+ If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
+ If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
+ openshift_version = self.get_openshift_version()
+
+ if float(openshift_version) >= 3.4:
+ return self.openshift_to_docker_version["3.4"]
+
+ docker_version = self.openshift_to_docker_version.get(str(openshift_version))
+ if docker_version:
+ return docker_version
+
+ msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_openshift_version(self):
+ """Return received image tag as a normalized X.Y minor version string."""
+ openshift_version = self.get_var("openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self.parse_version(openshift_version)
+
+ def parse_version(self, version):
+ """Return a normalized X.Y minor version string."""
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
new file mode 100644
index 000000000..2d068be3d
--- /dev/null
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -0,0 +1,252 @@
+import pytest
+
+from ansible.playbook.play_context import PlayContext
+
+from openshift_health_check import ActionModule, resolve_checks
+from openshift_checks import OpenShiftCheckException
+
+
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
+ """Returns a new class that is compatible with OpenShiftCheck for testing."""
+
+ _name, _tags = name, tags
+
+ class FakeCheck(object):
+ name = _name
+ tags = _tags or []
+
+ def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ pass
+
+ def is_active(self):
+ return is_active
+
+ def run(self):
+ if run_exception is not None:
+ raise run_exception
+ return run_return
+
+ return FakeCheck
+
+
+# Fixtures
+
+
+@pytest.fixture
+def plugin():
+ task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ return plugin
+
+
+class FakeTask(object):
+ def __init__(self, action, args):
+ self.action = action
+ self.args = args
+ self.async = 0
+
+
+@pytest.fixture
+def task_vars():
+ return dict(openshift=dict(), ansible_host='unit-test-host')
+
+
+# Assertion helpers
+
+
+def failed(result, msg_has=None):
+ if msg_has is not None:
+ assert 'msg' in result
+ for term in msg_has:
+ assert term.lower() in result['msg'].lower()
+ return result.get('failed', False)
+
+
+def changed(result):
+ return result.get('changed', False)
+
+
+# tests whether task is skipped, not individual checks
+def skipped(result):
+ return result.get('skipped', False)
+
+
+# Tests
+
+
+@pytest.mark.parametrize('task_vars', [
+ None,
+ {},
+])
+def test_action_plugin_missing_openshift_facts(plugin, task_vars):
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['openshift_facts'])
+
+
+def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch):
+ FakeCheck1 = fake_check('duplicate_name')
+ FakeCheck2 = fake_check('duplicate_name')
+ checks = [FakeCheck1, FakeCheck2]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck'])
+
+
+def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=False)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check('fake_check', is_active=True)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ task_vars['openshift_disable_check'] = 'fake_check'
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test'}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test', 'changed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
+ check_return_value = {'failed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+ exception_msg = 'fake check has an exception'
+ run_exception = OpenShiftCheckException(exception_msg)
+ check_class = fake_check(run_exception=run_exception)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['unknown', 'name'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+@pytest.mark.parametrize('names,all_checks,expected', [
+ ([], [], set()),
+ (
+ ['a', 'b'],
+ [
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b']),
+ ),
+ (
+ ['a', 'b', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b', 'from_group_1', 'from_group_2']),
+ ),
+])
+def test_resolve_checks_ok(names, all_checks, expected):
+ assert resolve_checks(names, all_checks) == expected
+
+
+@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [
+ (
+ ['testA', 'testB'],
+ [],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+ (
+ ['@group'],
+ [],
+ ['tag', 'name', 'group'],
+ ['check', '@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [],
+ ['check', 'name', 'testA', 'testB', 'tag', 'group'],
+ ['@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ ],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+])
+def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception):
+ with pytest.raises(Exception) as excinfo:
+ resolve_checks(names, all_checks)
+ for word in words_in_exception:
+ assert word in str(excinfo.value)
+ for word in words_not_in_exception:
+ assert word not in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
new file mode 100644
index 000000000..4100f6c70
--- /dev/null
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -0,0 +1,196 @@
+import pytest
+import aos_version
+
+from collections import namedtuple
+Package = namedtuple('Package', ['name', 'version'])
+
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+}
+
+
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
+ (
+ # all found
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
+ expected_pkgs,
+ ),
+ (
+ # found with more specific version
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
+ expected_pkgs,
+ ),
+ (
+ [Package('ovs', '2.6'), Package('ovs', '2.4')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+ (
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_precise_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expect_not_found', [
+ (
+ [],
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # none found
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # completely missing
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # not the right version
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # eggs found with multiple versions
+ ),
+])
+def test_check_precise_version_found_fail(pkgs, expect_not_found):
+ with pytest.raises(aos_version.PreciseVersionNotFound) as e:
+ aos_version._check_precise_version_found(pkgs, expected_pkgs)
+ assert list(expect_not_found.values()) == e.value.problem_pkgs
+
+
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
+ (
+ [],
+ expected_pkgs,
+ ),
+ (
+ # more precise but not strictly higher
+ [Package('spam', '3.2.1.9')],
+ expected_pkgs,
+ ),
+ (
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_higher_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
+ (
+ [Package('spam', '3.3')],
+ expected_pkgs,
+ ['spam-3.3'], # lower precision, but higher
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ expected_pkgs,
+ ['eggs-3.3.2'], # one too high
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ expected_pkgs,
+ ['eggs-3.4'], # multiple versions, one is higher
+ ),
+ (
+ [Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
+ expected_pkgs,
+ ['eggs-3.4'], # multiple versions, two are higher
+ ),
+ (
+ [Package('ovs', '2.8')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ['ovs-2.8'],
+ ),
+])
+def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
+ with pytest.raises(aos_version.FoundHigherVersion) as e:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+ assert set(expect_higher) == set(e.value.problem_pkgs)
+
+
+@pytest.mark.parametrize('pkgs', [
+ [],
+ [Package('spam', '3.2.1')],
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
+])
+def test_check_multi_minor_release(pkgs):
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
+ (
+ [Package('spam', '3.2.1'), Package('spam', '3.3.2')],
+ ['spam'],
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ ['eggs'],
+ ),
+])
+def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
+ with pytest.raises(aos_version.FoundMultiRelease) as e:
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+ assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
index bf717ae85..3cbd65507 100644
--- a/roles/openshift_health_checker/test/conftest.py
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -1,5 +1,11 @@
import os
import sys
-# extend sys.path so that tests can import openshift_checks
-sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
+# extend sys.path so that tests can import openshift_checks and action plugins
+# from this role.
+openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
+sys.path[1:1] = [
+ openshift_health_checker_path,
+ os.path.join(openshift_health_checker_path, 'action_plugins'),
+ os.path.join(openshift_health_checker_path, 'library'),
+]
diff --git a/roles/openshift_health_checker/test/curator_test.py b/roles/openshift_health_checker/test/curator_test.py
new file mode 100644
index 000000000..ae108c96e
--- /dev/null
+++ b/roles/openshift_health_checker/test/curator_test.py
@@ -0,0 +1,68 @@
+import pytest
+
+from openshift_checks.logging.curator import Curator
+
+
+def canned_curator(exec_oc=None):
+ """Create a Curator check object with canned exec_oc method"""
+ check = Curator("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+not_running_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "no Curator pods",
+ ),
+ (
+ [plain_curator_pod],
+ None,
+ ),
+ (
+ [not_running_curator_pod],
+ "not currently in a running state",
+ ),
+ (
+ [plain_curator_pod, plain_curator_pod],
+ "more than one Curator pod",
+ ),
+])
+def test_get_curator_pods(pods, expect_error):
+ check = canned_curator()
+ error = check.check_curator(pods)
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
new file mode 100644
index 000000000..e98d02c58
--- /dev/null
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -0,0 +1,177 @@
+import pytest
+
+from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
+])
+def test_is_active(group_names, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ )
+ assert DiskAvailability(None, task_vars).is_active() == is_active
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+ ([{'mount': '/var'}], ['/var']), # missing size_available
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=['masters'],
+ ansible_mounts=ansible_mounts,
+ )
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ DiskAvailability(fake_execute_module, task_vars).run()
+
+ for word in 'determine disk availability'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
+ (
+ ['masters'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['nodes'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ 1, # configure lower threshold
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9 + 1, # way smaller than recommended
+ }],
+ ),
+ (
+ ['etcd'],
+ 0,
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ }, {
+ # ... but enough on /var
+ 'mount': '/var',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+])
+def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
+ task_vars = dict(
+ group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
+ ansible_mounts=ansible_mounts,
+ )
+
+ result = DiskAvailability(fake_execute_module, task_vars).run()
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts,extra_words', [
+ (
+ ['masters'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['masters'],
+ 100, # set a higher threshold
+ [{
+ 'mount': '/',
+ 'size_available': 50 * 10**9, # would normally be enough...
+ }],
+ ['100.0 GB'],
+ ),
+ (
+ ['nodes'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ }],
+ ['1.0 GB'],
+ ),
+ (
+ ['etcd'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ 0,
+ [{
+ 'mount': '/',
+ # enough space for a node, not enough for a master
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ['15.0 GB'],
+ ),
+ (
+ ['etcd'],
+ 0,
+ [{
+ # enough space on / ...
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ # .. but not enough on /var
+ 'mount': '/var',
+ 'size_available': 0,
+ }],
+ ['0.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
+ ansible_mounts=ansible_mounts,
+ )
+
+ result = DiskAvailability(fake_execute_module, task_vars).run()
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 2a9c32f77..8d0a53df9 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,26 +3,272 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.xfail(strict=True) # TODO: remove this once this test is fully implemented.
-@pytest.mark.parametrize('task_vars,expected_result', [
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
+])
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_deployment_type=deployment_type,
+ group_names=group_names,
+ )
+ assert DockerImageAvailability(None, task_vars).is_active() == expect_active
+
+
+@pytest.mark.parametrize("is_containerized,is_atomic", [
+ (True, True),
+ (False, False),
+ (True, False),
+ (False, True),
+])
+def test_all_images_available_locally(is_containerized, is_atomic):
+ def execute_module(module_name, module_args, *_):
+ if module_name == "yum":
+ return {"changed": True}
+
+ assert module_name == "docker_image_facts"
+ assert 'name' in module_args
+ assert module_args['name']
+ return {
+ 'images': [module_args['name']],
+ }
+
+ result = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=is_containerized,
+ is_atomic=is_atomic,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='3.4',
+ group_names=['nodes', 'masters'],
+ )).run()
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize("available_locally", [
+ False,
+ True,
+])
+def test_all_images_available_remotely(available_locally):
+ def execute_module(module_name, *_):
+ if module_name == 'docker_image_facts':
+ return {'images': [], 'failed': available_locally}
+ return {'changed': False}
+
+ result = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='v3.4',
+ group_names=['nodes', 'masters'],
+ )).run()
+
+ assert not result.get('failed', False)
+
+
+def test_all_images_unavailable():
+ def execute_module(module_name=None, *_):
+ if module_name == "command":
+ return {
+ 'failed': True,
+ }
+
+ return {
+ 'changed': False,
+ }
+
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_image_tag='latest',
+ group_names=['nodes', 'masters'],
+ )).run()
+
+ assert actual['failed']
+ assert "required Docker images are not available" in actual['msg']
+
+
+@pytest.mark.parametrize("message,extra_words", [
(
- dict(
- openshift=dict(common=dict(
+ "docker image update failure",
+ ["docker image update failure"],
+ ),
+ (
+ "No package matching 'skopeo' found available, installed or updated",
+ ["dependencies can be installed via `yum`"]
+ ),
+])
+def test_skopeo_update_failure(message, extra_words):
+ def execute_module(module_name=None, *_):
+ if module_name == "yum":
+ return {
+ "failed": True,
+ "msg": message,
+ "changed": False,
+ }
+
+ return {'changed': False}
+
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["unknown.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )).run()
+
+ assert actual["failed"]
+ for word in extra_words:
+ assert word in actual["msg"]
+
+
+@pytest.mark.parametrize("deployment_type,registries", [
+ ("origin", ["unknown.io"]),
+ ("openshift-enterprise", ["registry.access.redhat.com"]),
+ ("openshift-enterprise", []),
+])
+def test_registry_availability(deployment_type, registries):
+ def execute_module(module_name=None, *_):
+ return {
+ 'changed': False,
+ }
+
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
service_type='origin',
is_containerized=False,
- )),
- openshift_release='v3.5',
- deployment_type='origin',
- openshift_image_tag='', # FIXME: should not be required
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=registries),
),
- {'changed': False},
+ openshift_deployment_type=deployment_type,
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )).run()
+
+ assert not actual.get("failed", False)
+
+
+@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+ ( # standard set of stuff required on nodes
+ "origin", False, ['nodes'], None,
+ set([
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # origin version of registry-console
+ ])
),
- # TODO: add more parameters here to test the multiple possible inputs that affect behavior.
+ ( # set a different URL for images
+ "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
+ set([
+ 'foo.io/openshift/origin-pod:vtest',
+ 'foo.io/openshift/origin-deployer:vtest',
+ 'foo.io/openshift/origin-docker-registry:vtest',
+ 'foo.io/openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # AFAICS this is not built from the URL
+ ])
+ ),
+ (
+ "origin", True, ['nodes', 'masters', 'etcd'], None,
+ set([
+ # images running on top of openshift
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes',
+ # containerized component images
+ 'openshift/origin:vtest',
+ 'openshift/node:vtest',
+ 'openshift/openvswitch:vtest',
+ 'registry.access.redhat.com/rhel7/etcd',
+ ])
+ ),
+ ( # enterprise images
+ "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'foo.io/openshift3/ose-pod:f13ac45',
+ 'foo.io/openshift3/ose-deployer:f13ac45',
+ 'foo.io/openshift3/ose-docker-registry:f13ac45',
+ 'foo.io/openshift3/ose-haproxy-router:f13ac45',
+ # registry-console is not constructed/versioned the same as the others.
+ 'registry.access.redhat.com/openshift3/registry-console',
+ # containerized images aren't built from oreg_url
+ 'openshift3/node:vtest',
+ 'openshift3/openvswitch:vtest',
+ ])
+ ),
+ (
+ "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'registry.access.redhat.com/rhel7/etcd',
+ # lb does not yet come in a containerized version
+ ])
+ ),
+
])
-def test_docker_image_availability(task_vars, expected_result):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {'info': {}} # TODO: this will vary depending on input parameters.
+def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+ task_vars = dict(
+ openshift=dict(
+ common=dict(
+ is_containerized=is_containerized,
+ is_atomic=False,
+ ),
+ ),
+ openshift_deployment_type=deployment_type,
+ group_names=groups,
+ oreg_url=oreg_url,
+ openshift_image_tag='vtest',
+ )
- check = DockerImageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result == expected_result
+ assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+
+
+def test_containerized_etcd():
+ task_vars = dict(
+ openshift=dict(
+ common=dict(
+ is_containerized=True,
+ ),
+ ),
+ openshift_deployment_type="origin",
+ group_names=['etcd'],
+ )
+ expected = set(['registry.access.redhat.com/rhel7/etcd'])
+ assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
new file mode 100644
index 000000000..e0dccc062
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -0,0 +1,305 @@
+import pytest
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
+
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
+])
+def test_is_active(is_containerized, group_names, is_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
+ )
+ assert DockerStorage(None, task_vars).is_active() == is_active
+
+
+def non_atomic_task_vars():
+ return {"openshift": {"common": {"is_atomic": False}}}
+
+
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": [("Backing Filesystem", "xfs")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ "DriverStatus": [("Backing Filesystem", "btrfs")],
+ }),
+ True,
+ ["storage is type 'btrfs'", "only supported with\n'xfs'"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": [("Backing Filesystem", "xfs")],
+ "OperatingSystem": "Red Hat Enterprise Linux Server release 7.2 (Maipo)",
+ "KernelVersion": "3.10.0-327.22.2.el7.x86_64",
+ }),
+ True,
+ ["Docker reports kernel version 3.10.0-327"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ "DriverStatus": [("Backing Filesystem", "xfs")],
+ "OperatingSystem": "CentOS",
+ "KernelVersion": "3.10.0-514",
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+])
+def test_check_storage_driver(docker_info, failed, expect_msg):
+ def execute_module(module_name, *_):
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
+
+ check = DockerStorage(execute_module, non_atomic_task_vars())
+ check.check_dm_usage = lambda status: dict() # stub out for this test
+ check.check_overlay_usage = lambda info: dict() # stub out for this test
+ result = check.run()
+
+ if failed:
+ assert result["failed"]
+ else:
+ assert not result.get("failed", False)
+
+ for word in expect_msg:
+ assert word in result["msg"]
+
+
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
+
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
+
+
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
+ (
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
+ ),
+ (
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
+ ),
+ (
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
+ ),
+ (
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
+ ),
+])
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = DockerStorage(None, task_vars)
+ check.get_vg_free = lambda pool: vg_free
+ result = check.check_dm_usage(driver_status)
+ result_success = not result.get("failed")
+
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
+
+
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
+ (
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
+ },
+ "Failed to run /sbin/vgs",
+ None,
+ ),
+ (
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
+ ),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
+])
+def test_vg_free(pool, command_returns, raises, returns):
+ def execute_module(module_name, *_):
+ if module_name != "command":
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = DockerStorage(execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check.get_vg_free(pool)
+ assert raises in str(err.value)
+ else:
+ ret = check.get_vg_free(pool)
+ assert ret == returns
+
+
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage.convert_to_bytes(string)
+ assert got == expect_bytes
+
+
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage.convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)
+
+
+ansible_mounts_enough = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 50 * 10**9,
+ 'size_total': 50 * 10**9,
+}]
+ansible_mounts_not_enough = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 0,
+ 'size_total': 50 * 10**9,
+}]
+ansible_mounts_missing_fields = [dict(mount='/var/lib/docker')]
+ansible_mounts_zero_size = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 0,
+ 'size_total': 0,
+}]
+
+
+@pytest.mark.parametrize('ansible_mounts, threshold, expect_fail, expect_msg', [
+ (
+ ansible_mounts_enough,
+ None,
+ False,
+ [],
+ ),
+ (
+ ansible_mounts_not_enough,
+ None,
+ True,
+ ["usage percentage", "higher than threshold"],
+ ),
+ (
+ ansible_mounts_not_enough,
+ "bogus percent",
+ True,
+ ["is not a percentage"],
+ ),
+ (
+ ansible_mounts_missing_fields,
+ None,
+ True,
+ ["Ansible bug"],
+ ),
+ (
+ ansible_mounts_zero_size,
+ None,
+ True,
+ ["Ansible bug"],
+ ),
+])
+def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
+ task_vars = non_atomic_task_vars()
+ task_vars["ansible_mounts"] = ansible_mounts
+ if threshold is not None:
+ task_vars["max_overlay_usage_percent"] = threshold
+ check = DockerStorage(None, task_vars)
+ docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
+ result = check.check_overlay_usage(docker_info)
+
+ assert expect_fail == bool(result.get("failed"))
+ for msg in expect_msg:
+ assert msg in result["msg"]
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
new file mode 100644
index 000000000..9edfc17c7
--- /dev/null
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -0,0 +1,180 @@
+import pytest
+import json
+
+from openshift_checks.logging.elasticsearch import Elasticsearch
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+def canned_elasticsearch(task_vars=None, exec_oc=None):
+ """Create an Elasticsearch check object with canned exec_oc method"""
+ check = Elasticsearch("dummy", task_vars or {}) # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+split_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-2"},
+ "name": "logging-es-2",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es-2",
+}
+
+
+def test_check_elasticsearch():
+ assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([])
+
+ # canned oc responses to match so all the checks pass
+ def _exec_oc(cmd, args):
+ if '_cat/master' in cmd:
+ return 'name logging-es'
+ elif '/_nodes' in cmd:
+ return json.dumps(es_node_list)
+ elif '_cluster/health' in cmd:
+ return '{"status": "green"}'
+ elif ' df ' in cmd:
+ return 'IUse% Use%\n 3% 4%\n'
+ else:
+ raise Exception(cmd)
+
+ assert not canned_elasticsearch({}, _exec_oc).check_elasticsearch([plain_es_pod])
+
+
+def pods_by_name(pods):
+ return {pod['metadata']['name']: pod for pod in pods}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ None,
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ 'Found multiple Elasticsearch masters',
+ ),
+])
+def test_check_elasticsearch_masters(pods, expect_error):
+ test_pods = list(pods)
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: test_pods.pop(0)['_test_master_name_str'])
+
+ errors = check._check_elasticsearch_masters(pods_by_name(pods))
+ assert_error(''.join(errors), expect_error)
+
+
+es_node_list = {
+ 'nodes': {
+ 'random-es-name': {
+ 'host': 'logging-es',
+ }}}
+
+
+@pytest.mark.parametrize('pods, node_list, expect_error', [
+ (
+ [],
+ {},
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ es_node_list,
+ None,
+ ),
+ (
+ [plain_es_pod],
+ {}, # empty list of nodes triggers KeyError
+ "Failed to query",
+ ),
+ (
+ [split_es_pod],
+ es_node_list,
+ 'does not correspond to any known ES pod',
+ ),
+])
+def test_check_elasticsearch_node_list(pods, node_list, expect_error):
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+
+ errors = check._check_elasticsearch_node_list(pods_by_name(pods))
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('pods, health_data, expect_error', [
+ (
+ [plain_es_pod],
+ [{"status": "green"}],
+ None,
+ ),
+ (
+ [plain_es_pod],
+ [{"no-status": "should bomb"}],
+ 'Could not retrieve cluster health status',
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ [{"status": "green"}, {"status": "red"}],
+ 'Elasticsearch cluster health status is RED',
+ ),
+])
+def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
+ test_health_data = list(health_data)
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(test_health_data.pop(0)))
+
+ errors = check._check_es_cluster_health(pods_by_name(pods))
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('disk_data, expect_error', [
+ (
+ 'df: /elasticsearch/persistent: No such file or directory\n',
+ 'Could not retrieve storage usage',
+ ),
+ (
+ 'IUse% Use%\n 3% 4%\n',
+ None,
+ ),
+ (
+ 'IUse% Use%\n 95% 40%\n',
+ 'Inode percent usage on the storage volume',
+ ),
+ (
+ 'IUse% Use%\n 3% 94%\n',
+ 'Disk percent usage on the storage volume',
+ ),
+])
+def test_check_elasticsearch_diskspace(disk_data, expect_error):
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: disk_data)
+
+ errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
+ assert_error(''.join(errors), expect_error)
diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
new file mode 100644
index 000000000..e3d6706fa
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
@@ -0,0 +1,328 @@
+import pytest
+
+from collections import namedtuple
+from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException
+from etcdkeysize import check_etcd_key_size
+
+
+def fake_etcd_client(root):
+ fake_nodes = dict()
+ fake_etcd_node(root, fake_nodes)
+
+ clientclass = namedtuple("client", ["read"])
+ return clientclass(lambda key, recursive: fake_etcd_result(fake_nodes[key]))
+
+
+def fake_etcd_result(fake_node):
+ resultclass = namedtuple("result", ["leaves"])
+ if not fake_node.dir:
+ return resultclass([fake_node])
+
+ return resultclass(fake_node.leaves)
+
+
+def fake_etcd_node(node, visited):
+ min_req_fields = ["dir", "key"]
+ fields = list(node)
+ leaves = []
+
+ if node["dir"] and node.get("leaves"):
+ for leaf in node["leaves"]:
+ leaves.append(fake_etcd_node(leaf, visited))
+
+ if len(set(min_req_fields) - set(fields)) > 0:
+ raise ValueError("fake etcd nodes require at least {} fields.".format(min_req_fields))
+
+ if node.get("leaves"):
+ node["leaves"] = leaves
+
+ nodeclass = namedtuple("node", fields)
+ nodeinst = nodeclass(**node)
+ visited[nodeinst.key] = nodeinst
+
+ return nodeinst
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+])
+def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ )
+ check = EtcdImageDataSize(fake_execute_module, task_vars)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run()
+
+ for word in 'determine valid etcd mountpath'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('ansible_mounts,tree,size_limit,should_fail,extra_words', [
+ (
+ # test that default image size limit evals to 1/2 * (total size in use)
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ False,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ False,
+ [],
+ ),
+ (
+ # set max size limit for image data to be below total node value
+ # total node value is defined as the sum of the value field
+ # from every node
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "12345678"},
+ 7,
+ True,
+ ["exceeds the maximum recommended limit", "0.00 GB"],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 48 * 10**9 - 1,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ True,
+ ["exceeds the maximum recommended limit", "0.00 GB"],
+ )
+])
+def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
+ def execute_module(module_name, module_args, *_):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ client = fake_etcd_client(tree)
+ s, limit_exceeded = check_etcd_key_size(client, tree["key"], module_args["size_limit_bytes"])
+
+ return {"size_limit_exceeded": limit_exceeded}
+
+ task_vars = dict(
+ etcd_max_image_data_size_bytes=size_limit,
+ ansible_mounts=ansible_mounts,
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+ if size_limit is None:
+ task_vars.pop("etcd_max_image_data_size_bytes")
+
+ check = EtcdImageDataSize(execute_module, task_vars).run()
+
+ if should_fail:
+ assert check["failed"]
+
+ for word in extra_words:
+ assert word in check["msg"]
+ else:
+ assert not check.get("failed", False)
+
+
+@pytest.mark.parametrize('ansible_mounts,tree,root_path,expected_size,extra_words', [
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test recursive size check on tree with height > 1
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/foo5",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar1", "value": "56789"},
+ {"dir": False, "key": "/foo/bar2", "value": "56789"},
+ {"dir": False, "key": "/foo/bar3", "value": "56789"},
+ {
+ "dir": True,
+ "key": "/foo/bar4",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar/baz1", "value": "123"},
+ {"dir": False, "key": "/foo/bar/baz2", "value": "123"},
+ ]
+ },
+ ]
+ },
+ ]
+ },
+ "/",
+ 37,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test correct sub-tree size calculation
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/foo5",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar1", "value": "56789"},
+ {"dir": False, "key": "/foo/bar2", "value": "56789"},
+ {"dir": False, "key": "/foo/bar3", "value": "56789"},
+ {
+ "dir": True,
+ "key": "/foo/bar4",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar/baz1", "value": "123"},
+ {"dir": False, "key": "/foo/bar/baz2", "value": "123"},
+ ]
+ },
+ ]
+ },
+ ]
+ },
+ "/foo5",
+ 21,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test that a non-existing key is handled correctly
+ {
+ "dir": False,
+ "key": "/",
+ "value": "1234",
+ },
+ "/missing",
+ 0,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test etcd cycle handling
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1"},
+ ],
+ },
+ ]
+ },
+ "/",
+ 16,
+ [],
+ ),
+])
+def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
+ def execute_module(module_name, module_args, *_):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ client = fake_etcd_client(tree)
+ size, limit_exceeded = check_etcd_key_size(client, root_path, module_args["size_limit_bytes"])
+
+ assert size == expected_size
+ return {
+ "size_limit_exceeded": limit_exceeded,
+ }
+
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+
+ check = EtcdImageDataSize(execute_module, task_vars).run()
+ assert not check.get("failed", False)
+
+
+def test_etcdkeysize_module_failure():
+ def execute_module(module_name, *_):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ return {
+ "rc": 1,
+ "module_stderr": "failure",
+ }
+
+ task_vars = dict(
+ ansible_mounts=[{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+
+ check = EtcdImageDataSize(execute_module, task_vars).run()
+
+ assert check["failed"]
+ for word in "Failed to retrieve stats":
+ assert word in check["msg"]
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
new file mode 100644
index 000000000..f4316c423
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -0,0 +1,74 @@
+import pytest
+
+from openshift_checks.etcd_traffic import EtcdTraffic
+
+
+@pytest.mark.parametrize('group_names,version,is_active', [
+ (['masters'], "3.5", False),
+ (['masters'], "3.6", False),
+ (['nodes'], "3.4", False),
+ (['etcd'], "3.4", True),
+ (['etcd'], "3.5", True),
+ (['etcd'], "3.1", False),
+ (['masters', 'nodes'], "3.5", False),
+ (['masters', 'etcd'], "3.5", True),
+ ([], "3.4", False),
+])
+def test_is_active(group_names, version, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(
+ common=dict(short_version=version),
+ ),
+ )
+ assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
+
+
+@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
+ (["masters"], True, True, ["Higher than normal", "traffic"]),
+ (["masters", "etcd"], False, False, []),
+ (["etcd"], False, False, []),
+])
+def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
+ def execute_module(module_name, *_):
+ return {
+ "matched": matched,
+ "failed": failed,
+ }
+
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(
+ common=dict(service_type="origin", is_containerized=False),
+ )
+ )
+
+ result = EtcdTraffic(execute_module, task_vars).run()
+
+ for word in extra_words:
+ assert word in result.get("msg", "")
+
+ assert result.get("failed", False) == failed
+
+
+@pytest.mark.parametrize('is_containerized,expected_unit_value', [
+ (False, "etcd"),
+ (True, "etcd_container"),
+])
+def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value):
+ task_vars = dict(
+ openshift=dict(
+ common=dict(is_containerized=is_containerized),
+ )
+ )
+
+ def execute_module(module_name, args, *_):
+ assert module_name == "search_journalctl"
+ matchers = args["log_matchers"]
+
+ for matcher in matchers:
+ assert matcher["unit"] == expected_unit_value
+
+ return {"failed": False}
+
+ EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py
new file mode 100644
index 000000000..0b255136e
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_volume_test.py
@@ -0,0 +1,146 @@
+import pytest
+
+from openshift_checks.etcd_volume import EtcdVolume, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ )
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ EtcdVolume(fake_execute_module, task_vars).run()
+
+ for word in 'Unable to find etcd storage mount point'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('size_limit,ansible_mounts', [
+ (
+ # if no size limit is specified, expect max usage
+ # limit to default to 90% of size_total
+ None,
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9
+ }],
+ ),
+ (
+ 1,
+ [{
+ 'mount': '/',
+ 'size_available': 30 * 10**9,
+ 'size_total': 30 * 10**9,
+ }],
+ ),
+ (
+ 20000000000,
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9,
+ 'size_total': 40 * 10**9,
+ }],
+ ),
+ (
+ 5000000000,
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 0,
+ 'size_total': 0,
+ }, {
+ # not enough space on /var/lib ...
+ 'mount': '/var/lib',
+ 'size_available': 2 * 10**9,
+ 'size_total': 21 * 10**9,
+ }, {
+ # ... but enough on /var/lib/etcd
+ 'mount': '/var/lib/etcd',
+ 'size_available': 36 * 10**9,
+ 'size_total': 40 * 10**9
+ }],
+ )
+])
+def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
+ task_vars = dict(
+ etcd_device_usage_threshold_percent=size_limit,
+ ansible_mounts=ansible_mounts,
+ )
+
+ if task_vars["etcd_device_usage_threshold_percent"] is None:
+ task_vars.pop("etcd_device_usage_threshold_percent")
+
+ result = EtcdVolume(fake_execute_module, task_vars).run()
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('size_limit_percent,ansible_mounts,extra_words', [
+ (
+ # if no size limit is specified, expect max usage
+ # limit to default to 90% of size_total
+ None,
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ 'size_total': 100 * 10**9,
+ }],
+ ['99.0%'],
+ ),
+ (
+ 70.0,
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**6,
+ 'size_total': 5 * 10**9,
+ }],
+ ['100.0%'],
+ ),
+ (
+ 40.0,
+ [{
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ 'size_total': 6 * 10**9,
+ }],
+ ['66.7%'],
+ ),
+ (
+ None,
+ [{
+ # enough space on /var ...
+ 'mount': '/var',
+ 'size_available': 20 * 10**9,
+ 'size_total': 20 * 10**9,
+ }, {
+ # .. but not enough on /var/lib
+ 'mount': '/var/lib',
+ 'size_available': 1 * 10**9,
+ 'size_total': 20 * 10**9,
+ }],
+ ['95.0%'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts, extra_words):
+ task_vars = dict(
+ etcd_device_usage_threshold_percent=size_limit_percent,
+ ansible_mounts=ansible_mounts,
+ )
+
+ if task_vars["etcd_device_usage_threshold_percent"] is None:
+ task_vars.pop("etcd_device_usage_threshold_percent")
+
+ result = EtcdVolume(fake_execute_module, task_vars).run()
+
+ assert result['failed']
+ for word in extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
new file mode 100644
index 000000000..9cee57868
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -0,0 +1,109 @@
+import pytest
+import json
+
+from openshift_checks.logging.fluentd import Fluentd
+
+
+def canned_fluentd(exec_oc=None):
+ """Create a Fluentd check object with canned exec_oc method"""
+ check = Fluentd("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+fluentd_pod_node2_down = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "spec": {"host": "node2", "nodeName": "node2"},
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ }
+}
+fluentd_node1 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
+ "name": "node1",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
+}
+fluentd_node2 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
+ "name": "node2",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
+}
+fluentd_node3_unlabeled = {
+ "metadata": {
+ "labels": {"kubernetes.io/hostname": "hostname"},
+ "name": "node3",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
+}
+
+
+@pytest.mark.parametrize('pods, nodes, expect_error', [
+ (
+ [],
+ [],
+ 'No nodes appear to be defined',
+ ),
+ (
+ [],
+ [fluentd_node3_unlabeled],
+ 'There are no nodes with the fluentd label',
+ ),
+ (
+ [],
+ [fluentd_node1, fluentd_node3_unlabeled],
+ 'Fluentd will not aggregate logs from these nodes.',
+ ),
+ (
+ [],
+ [fluentd_node2],
+ "nodes are supposed to have a Fluentd pod but do not",
+ ),
+ (
+ [fluentd_pod_node1, fluentd_pod_node1],
+ [fluentd_node1],
+ 'more Fluentd pods running than nodes labeled',
+ ),
+ (
+ [fluentd_pod_node2_down],
+ [fluentd_node2],
+ "Fluentd pods are supposed to be running",
+ ),
+ (
+ [fluentd_pod_node1],
+ [fluentd_node1],
+ None,
+ ),
+])
+def test_get_fluentd_pods(pods, nodes, expect_error):
+ check = canned_fluentd(exec_oc=lambda cmd, args: json.dumps(dict(items=nodes)))
+
+ error = check.check_fluentd(pods)
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
new file mode 100644
index 000000000..3a880d300
--- /dev/null
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -0,0 +1,218 @@
+import pytest
+import json
+
+try:
+ import urllib2
+ from urllib2 import HTTPError, URLError
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks.logging.kibana import Kibana
+
+
+def canned_kibana(exec_oc=None):
+ """Create a Kibana check object with canned exec_oc method"""
+ check = Kibana() # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "There are no Kibana pods deployed",
+ ),
+ (
+ [plain_kibana_pod],
+ None,
+ ),
+ (
+ [not_running_kibana_pod],
+ "No Kibana pod is in a running state",
+ ),
+ (
+ [plain_kibana_pod, not_running_kibana_pod],
+ "The following Kibana pods are not currently in a running state",
+ ),
+])
+def test_check_kibana(pods, expect_error):
+ check = canned_kibana()
+ error = check.check_kibana(pods)
+ assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('route, expect_url, expect_error', [
+ (
+ None,
+ None,
+ 'no_route_exists',
+ ),
+
+ # test route with no ingress
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [],
+ },
+ "spec": {
+ "host": "hostname",
+ }
+ },
+ None,
+ 'route_not_accepted',
+ ),
+
+ # test route with no host
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {},
+ },
+ None,
+ 'route_missing_host',
+ ),
+
+ # test route that looks fine
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {
+ "host": "hostname",
+ },
+ },
+ "https://hostname/",
+ None,
+ ),
+])
+def test_get_kibana_url(route, expect_url, expect_error):
+ check = canned_kibana(exec_oc=lambda cmd, args: json.dumps(route) if route else "")
+
+ url, error = check._get_kibana_url()
+ if expect_url:
+ assert url == expect_url
+ else:
+ assert not url
+ if expect_error:
+ assert error == expect_error
+ else:
+ assert not error
+
+
+@pytest.mark.parametrize('exec_result, expect', [
+ (
+ 'urlopen error [Errno 111] Connection refused',
+ 'at least one router routing to it?',
+ ),
+ (
+ 'urlopen error [Errno -2] Name or service not known',
+ 'DNS configured for the Kibana hostname?',
+ ),
+ (
+ 'Status code was not [302]: HTTP Error 500: Server error',
+ 'did not return the correct status code',
+ ),
+ (
+ 'bork bork bork',
+ 'bork bork bork', # should pass through
+ ),
+])
+def test_verify_url_internal_failure(exec_result, expect):
+ check = Kibana(execute_module=lambda *_: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda: ('url', None)
+
+ error = check._check_kibana_route()
+ assert_error(error, expect)
+
+
+@pytest.mark.parametrize('lib_result, expect', [
+ (
+ HTTPError('url', 500, "it broke", hdrs=None, fp=None),
+ 'it broke',
+ ),
+ (
+ URLError('it broke'),
+ 'it broke',
+ ),
+ (
+ 302,
+ 'returned the wrong error code',
+ ),
+ (
+ 200,
+ None,
+ ),
+])
+def test_verify_url_external_failure(lib_result, expect, monkeypatch):
+
+ class _http_return:
+
+ def __init__(self, code):
+ self.code = code
+
+ def getcode(self):
+ return self.code
+
+ def urlopen(url, context):
+ if type(lib_result) is int:
+ return _http_return(lib_result)
+ raise lib_result
+ monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+
+ check = canned_kibana()
+ check._get_kibana_url = lambda: ('url', None)
+ check._verify_url_internal = lambda url: None
+
+ error = check._check_kibana_route()
+ assert_error(error, expect)
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
new file mode 100644
index 000000000..6f1697ee6
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -0,0 +1,165 @@
+import pytest
+import json
+
+from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+logging_namespace = "logging"
+
+
+def canned_loggingcheck(exec_oc=None):
+ """Create a LoggingCheck object with canned exec_oc method"""
+ check = LoggingCheck() # fails if a module is actually invoked
+ check.logging_namespace = 'logging'
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+plain_kibana_pod_no_containerstatus = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('problem, expect', [
+ ("[Errno 2] No such file or directory", "supposed to be a master"),
+ ("Permission denied", "Unexpected error using `oc`"),
+])
+def test_oc_failure(problem, expect):
+ def execute_module(module_name, *_):
+ if module_name == "ocutil":
+ return dict(failed=True, result=problem)
+ return dict(changed=False)
+
+ check = LoggingCheck(execute_module, task_vars_config_base)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_oc(logging_namespace, 'get foo', [])
+ assert expect in str(excinfo)
+
+
+groups_with_first_master = dict(masters=['this-host', 'other-host'])
+groups_with_second_master = dict(masters=['other-host', 'this-host'])
+groups_not_a_master = dict(masters=['other-host'])
+
+
+@pytest.mark.parametrize('groups, logging_deployed, is_active', [
+ (groups_with_first_master, True, True),
+ (groups_with_first_master, False, False),
+ (groups_not_a_master, True, False),
+ (groups_with_second_master, True, False),
+ (groups_not_a_master, True, False),
+])
+def test_is_active(groups, logging_deployed, is_active):
+ task_vars = dict(
+ ansible_ssh_host='this-host',
+ groups=groups,
+ openshift_hosted_logging_deploy=logging_deployed,
+ )
+
+ assert LoggingCheck(None, task_vars).is_active() == is_active
+
+
+@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
+ (
+ 'No resources found.',
+ None,
+ 'No pods were found for the "es"',
+ ),
+ (
+ json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}),
+ [plain_es_pod],
+ None,
+ ),
+])
+def test_get_pods_for_component(pod_output, expect_pods, expect_error):
+ check = canned_loggingcheck(lambda namespace, cmd, args: pod_output)
+ pods, error = check.get_pods_for_component(
+ logging_namespace,
+ "es",
+ )
+ assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('name, pods, expected_pods', [
+ (
+ 'test single pod found, scheduled, but no containerStatuses field',
+ [plain_kibana_pod_no_containerstatus],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+ (
+ 'set of pods has at least one pod with containerStatuses (scheduled); should still fail',
+ [plain_kibana_pod_no_containerstatus, plain_kibana_pod],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+
+], ids=lambda argvals: argvals[0])
+def test_get_not_running_pods_no_container_status(name, pods, expected_pods):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: '')
+ result = check.not_running_pods(pods)
+
+ assert result == expected_pods
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
new file mode 100644
index 000000000..178d7cd84
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -0,0 +1,170 @@
+import json
+
+import pytest
+
+from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException
+
+
+SAMPLE_UUID = "unique-test-uuid"
+
+
+def canned_loggingindextime(exec_oc=None):
+ """Create a check object with a canned exec_oc method"""
+ check = LoggingIndexTime() # fails if a module is actually invoked
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+plain_running_elasticsearch_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-data-master"},
+ "name": "logging-es-data-master-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "phase": "Running",
+ }
+}
+plain_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "phase": "Running",
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "phase": "pending",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_pods', [
+ (
+ [not_running_kibana_pod],
+ [],
+ ),
+ (
+ [plain_running_kibana_pod],
+ [plain_running_kibana_pod],
+ ),
+ (
+ [],
+ [],
+ )
+])
+def test_check_running_pods(pods, expect_pods):
+ check = canned_loggingindextime()
+ pods = check.running_pods(pods)
+ assert pods == expect_pods
+
+
+@pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [
+ (
+ 'valid count in response',
+ {
+ "count": 1,
+ },
+ SAMPLE_UUID,
+ 0.001,
+ [],
+ ),
+], ids=lambda argval: argval[0])
+def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extra_words):
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
+
+
+@pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [
+ (
+ 'invalid json response',
+ {
+ "invalid_field": 1,
+ },
+ SAMPLE_UUID,
+ 0.001,
+ ["invalid response", "Elasticsearch"],
+ ),
+ (
+ 'empty response',
+ {},
+ SAMPLE_UUID,
+ 0.001,
+ ["invalid response", "Elasticsearch"],
+ ),
+ (
+ 'valid response but invalid match count',
+ {
+ "count": 0,
+ },
+ SAMPLE_UUID,
+ 0.005,
+ ["expecting match", SAMPLE_UUID, "0.005s"],
+ )
+], ids=lambda argval: argval[0])
+def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
+
+ for word in extra_words:
+ assert word in str(error)
+
+
+@pytest.mark.parametrize('name, json_response, uuid, extra_words', [
+ (
+ 'correct response code, found unique id is returned',
+ {
+ "statusCode": 404,
+ },
+ "sample unique id",
+ ["sample unique id"],
+ ),
+], ids=lambda argval: argval[0])
+def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check.generate_uuid = lambda: uuid
+
+ result = check.curl_kibana_with_uuid(plain_running_kibana_pod)
+
+ for word in extra_words:
+ assert word in result
+
+
+@pytest.mark.parametrize('name, json_response, uuid, extra_words', [
+ (
+ 'invalid json response',
+ {
+ "invalid_field": "invalid",
+ },
+ SAMPLE_UUID,
+ ["invalid response returned", 'Missing "statusCode" key'],
+ ),
+ (
+ 'wrong error code in response',
+ {
+ "statusCode": 500,
+ },
+ SAMPLE_UUID,
+ ["Expecting error code", "500"],
+ ),
+], ids=lambda argval: argval[0])
+def test_failed_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check.generate_uuid = lambda: uuid
+
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.curl_kibana_with_uuid(plain_running_kibana_pod)
+
+ for word in extra_words:
+ assert word in str(error)
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
new file mode 100644
index 000000000..aee2f0416
--- /dev/null
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -0,0 +1,127 @@
+import pytest
+
+from openshift_checks.memory_availability import MemoryAvailability
+
+
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
+])
+def test_is_active(group_names, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ )
+ assert MemoryAvailability(None, task_vars).is_active() == is_active
+
+
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
+ (
+ ['masters'],
+ 0,
+ 17200,
+ ),
+ (
+ ['nodes'],
+ 0,
+ 8200,
+ ),
+ (
+ ['nodes'],
+ 1, # configure lower threshold
+ 2000, # too low for recommended but not for configured
+ ),
+ (
+ ['nodes'],
+ 2, # configure threshold where adjustment pushes it over
+ 1900,
+ ),
+ (
+ ['etcd'],
+ 0,
+ 8200,
+ ),
+ (
+ ['masters', 'nodes'],
+ 0,
+ 17000,
+ ),
+])
+def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_memtotal_mb):
+ task_vars = dict(
+ group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [
+ (
+ ['masters'],
+ 0,
+ 0,
+ ['0.0 GiB'],
+ ),
+ (
+ ['nodes'],
+ 0,
+ 100,
+ ['0.1 GiB'],
+ ),
+ (
+ ['nodes'],
+ 24, # configure higher threshold
+ 20 * 1024, # enough to meet recommended but not configured
+ ['20.0 GiB'],
+ ),
+ (
+ ['nodes'],
+ 24, # configure higher threshold
+ 22 * 1024, # not enough for adjustment to push over threshold
+ ['22.0 GiB'],
+ ),
+ (
+ ['etcd'],
+ 0,
+ 6 * 1024,
+ ['6.0 GiB'],
+ ),
+ (
+ ['etcd', 'masters'],
+ 0,
+ 9 * 1024, # enough memory for etcd, not enough for a master
+ ['9.0 GiB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ 0,
+ # enough memory for a node, not enough for a master
+ 11 * 1024,
+ ['11.0 GiB'],
+ ),
+])
+def test_fails_with_insufficient_memory(group_names, configured_min, ansible_memtotal_mb, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
+
+ assert result.get('failed', False)
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
index 2d83e207d..b1a41ca3c 100644
--- a/roles/openshift_health_checker/test/mixins_test.py
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -14,10 +14,10 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
(dict(openshift=dict(common=dict(is_containerized=True))), False),
])
def test_is_active(task_vars, expected):
- assert NotContainerizedCheck.is_active(task_vars) == expected
+ assert NotContainerizedCheck(None, task_vars).is_active() == expected
def test_is_active_missing_task_vars():
with pytest.raises(OpenShiftCheckException) as excinfo:
- NotContainerizedCheck.is_active(task_vars={})
+ NotContainerizedCheck().is_active()
assert 'is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index e3153979c..43aa875f4 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -1,7 +1,7 @@
import pytest
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
-from openshift_checks import load_checks, get_var
+from openshift_checks import load_checks
# Fixtures
@@ -28,34 +28,23 @@ def test_OpenShiftCheck_init():
name = "test_check"
run = NotImplemented
- # initialization requires at least one argument (apart from self)
- with pytest.raises(TypeError) as excinfo:
- TestCheck()
+ # execute_module required at init if it will be used
+ with pytest.raises(RuntimeError) as excinfo:
+ TestCheck().execute_module("foo")
assert 'execute_module' in str(excinfo.value)
- assert 'module_executor' in str(excinfo.value)
execute_module = object()
# initialize with positional argument
check = TestCheck(execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, recommended name
+ # initialize with keyword argument
check = TestCheck(execute_module=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, deprecated name
- check = TestCheck(module_executor=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check.task_vars == {}
+ assert check.tmp is None
def test_subclasses():
@@ -81,19 +70,27 @@ def test_load_checks():
assert modules
+def dummy_check(task_vars):
+ class TestCheck(OpenShiftCheck):
+ name = "dummy"
+ run = NotImplemented
+
+ return TestCheck(task_vars=task_vars)
+
+
@pytest.mark.parametrize("keys,expected", [
(("foo",), 42),
(("bar", "baz"), "openshift"),
])
def test_get_var_ok(task_vars, keys, expected):
- assert get_var(task_vars, *keys) == expected
+ assert dummy_check(task_vars).get_var(*keys) == expected
def test_get_var_error(task_vars, missing_keys):
with pytest.raises(OpenShiftCheckException):
- get_var(task_vars, *missing_keys)
+ dummy_check(task_vars).get_var(*missing_keys)
def test_get_var_default(task_vars, missing_keys):
default = object()
- assert get_var(task_vars, *missing_keys, default=default) == default
+ assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
new file mode 100644
index 000000000..b6acef5a6
--- /dev/null
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -0,0 +1,86 @@
+import pytest
+
+from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
+
+
+def test_openshift_version_not_supported():
+ def execute_module(*_):
+ return {}
+
+ openshift_release = '111.7.0'
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ OvsVersion(execute_module, task_vars).run()
+
+ assert "no recommended version of Open vSwitch" in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(*_):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ OvsVersion(execute_module, task_vars).run()
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release,expected_ovs_version', [
+ ("3.5", "2.6"),
+ ("3.6", "2.6"),
+ ("3.4", "2.4"),
+ ("3.3", "2.4"),
+ ("1.0", "2.4"),
+])
+def test_ovs_package_version(openshift_release, expected_ovs_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, *_):
+ assert module_name == 'rpm_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "openvswitch":
+ assert pkg["version"] == expected_ovs_version
+
+ return return_value
+
+ result = OvsVersion(execute_module, task_vars).run()
+ assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert OvsVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 25385339a..1fe648b75 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -3,6 +3,20 @@ import pytest
from openshift_checks.package_availability import PackageAvailability
+@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+ ('yum', False, True),
+ ('yum', True, False),
+ ('dnf', True, False),
+ ('dnf', False, False),
+])
+def test_is_active(pkg_mgr, is_containerized, is_active):
+ task_vars = dict(
+ ansible_pkg_mgr=pkg_mgr,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageAvailability(None, task_vars).is_active() == is_active
+
+
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
dict(openshift=dict(common=dict(service_type='openshift'))),
@@ -37,13 +51,12 @@ from openshift_checks.package_availability import PackageAvailability
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
return return_value
- check = PackageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = PackageAvailability(execute_module, task_vars).run()
assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 5e000cff5..06489b0d7 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -4,13 +4,12 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
return return_value
- check = PackageUpdate(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=None)
+ result = PackageUpdate(execute_module).run()
assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index cc1d263bc..1ddb9cecb 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,21 +1,124 @@
-from openshift_checks.package_version import PackageVersion
+import pytest
+from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('openshift_release, extra_words', [
+ ('111.7.0', ["no recommended version of Open vSwitch"]),
+ ('0.0.0', ["no recommended version of Docker"]),
+])
+def test_openshift_version_not_supported(openshift_release, extra_words):
+ def execute_module(*_):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run()
+
+ for word in extra_words:
+ assert word in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(*_):
+ return {}
-def test_package_version():
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
- openshift_release='v3.5',
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run()
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release', [
+ "3.5",
+ "3.6",
+ "3.4",
+ "3.3",
+])
+def test_package_version(openshift_release):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
)
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
- assert 'prefix' in module_args
- assert 'version' in module_args
- assert module_args['prefix'] == task_vars['openshift']['common']['service_type']
- assert module_args['version'] == task_vars['openshift_release']
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if "-master" in pkg["name"] or "-node" in pkg["name"]:
+ assert pkg["version"] == task_vars["openshift_release"]
+
return return_value
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ check = PackageVersion(execute_module, task_vars)
+ result = check.run()
assert result is return_value
+
+
+@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
+ ("origin", "3.5", "1.12"),
+ ("openshift-enterprise", "3.4", "1.12"),
+ ("origin", "3.3", "1.10"),
+ ("openshift-enterprise", "3.2", "1.10"),
+ ("origin", "3.1", "1.8"),
+ ("openshift-enterprise", "3.1", "1.8"),
+])
+def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type=deployment_type,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, *_):
+ assert module_name == 'aos_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "docker":
+ assert pkg["version"] == expected_docker_version
+
+ return return_value
+
+ check = PackageVersion(execute_module, task_vars)
+ result = check.run()
+ assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/rpm_version_test.py b/roles/openshift_health_checker/test/rpm_version_test.py
new file mode 100644
index 000000000..2f09ef965
--- /dev/null
+++ b/roles/openshift_health_checker/test/rpm_version_test.py
@@ -0,0 +1,82 @@
+import pytest
+import rpm_version
+
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ },
+}
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {},
+ ["spam", "eggs"], # none found
+ ),
+ (
+ {"spam": ["3.2.1", "4.5.1"]},
+ ["eggs"], # completely missing
+ ),
+ (
+ {
+ "spam": ["3.2.1", "4.5.1"],
+ "eggs": ["3.2.1"],
+ },
+ [], # all found
+ ),
+])
+def test_check_pkg_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "not found to be installed" in str(e.value)
+ assert set(expect_not_found) == set(e.value.problem_pkgs)
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {
+ 'spam': ['3.2.1'],
+ 'eggs': ['3.3.2'],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3"],
+ }
+ }, # not the right version
+ ),
+ (
+ {
+ 'spam': ['3.1.2', "3.3.2"],
+ 'eggs': ['3.3.2', "1.2.3"],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3", "1.2"],
+ },
+ "spam": {
+ "required_version": "3.2",
+ "found_versions": ["3.1", "3.3"],
+ }
+ }, # not the right version
+ ),
+])
+def test_check_pkg_version_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "found to be installed with an incorrect version" in str(e.value)
+ assert expect_not_found == e.value.problem_pkgs
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
diff --git a/roles/openshift_health_checker/test/search_journalctl_test.py b/roles/openshift_health_checker/test/search_journalctl_test.py
new file mode 100644
index 000000000..724928aa1
--- /dev/null
+++ b/roles/openshift_health_checker/test/search_journalctl_test.py
@@ -0,0 +1,157 @@
+import pytest
+import search_journalctl
+
+
+def canned_search_journalctl(get_log_output=None):
+ """Create a search_journalctl object with canned get_log_output method"""
+ module = search_journalctl
+ if get_log_output:
+ module.get_log_output = get_log_output
+ return module
+
+
+DEFAULT_TIMESTAMP = 1496341364
+
+
+def get_timestamp(modifier=0):
+ return DEFAULT_TIMESTAMP + modifier
+
+
+def get_timestamp_microseconds(modifier=0):
+ return get_timestamp(modifier) * 1000000
+
+
+def create_test_log_object(stamp, msg):
+ return '{{"__REALTIME_TIMESTAMP": "{}", "MESSAGE": "{}"}}'.format(stamp, msg)
+
+
+@pytest.mark.parametrize('name,matchers,log_input,expected_matches,expected_errors', [
+ (
+ 'test with valid params',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test log message",
+ "unit": "test",
+ },
+ ],
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ ["test log message"],
+ [],
+ ),
+ (
+ 'test with invalid json in log input',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test log message",
+ "unit": "test-unit",
+ },
+ ],
+ [
+ '{__REALTIME_TIMESTAMP: ' + str(get_timestamp_microseconds()) + ', "MESSAGE": "test log message"}',
+ ],
+ [],
+ [
+ ["invalid json", "test-unit", "test log message"],
+ ],
+ ),
+ (
+ 'test with invalid regexp',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test [ log message",
+ "unit": "test",
+ },
+ ],
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ [],
+ [
+ ["invalid regular expression"],
+ ],
+ ),
+], ids=lambda argval: argval[0])
+def test_get_log_matches(name, matchers, log_input, expected_matches, expected_errors):
+ def get_log_output(matcher):
+ return log_input
+
+ module = canned_search_journalctl(get_log_output)
+ matched_regexp, errors = module.get_log_matches(matchers, 500, 60 * 60)
+
+ assert set(matched_regexp) == set(expected_matches)
+ assert len(expected_errors) == len(errors)
+
+ for idx, partial_err_set in enumerate(expected_errors):
+ for partial_err_msg in partial_err_set:
+ assert partial_err_msg in errors[idx]
+
+
+@pytest.mark.parametrize('name,matcher,log_count_lim,stamp_lim_seconds,log_input,expected_match', [
+ (
+ 'test with matching log message, but out of bounds of log_count_lim',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 3,
+ get_timestamp(-100 * 60 * 60),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ None,
+ ),
+ (
+ 'test with matching log message, but with timestamp too old',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 100,
+ get_timestamp(-10),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"),
+ ],
+ None,
+ ),
+ (
+ 'test with matching log message, and timestamp within time limit',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 100,
+ get_timestamp(-1010),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"),
+ ],
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ ),
+], ids=lambda argval: argval[0])
+def test_find_matches_skips_logs(name, matcher, log_count_lim, stamp_lim_seconds, log_input, expected_match):
+ match = search_journalctl.find_matches(log_input, matcher, log_count_lim, stamp_lim_seconds)
+ assert match == expected_match
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 6d576df71..3e5d7f860 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -28,6 +28,14 @@ From this role:
| openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |
| openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
+If you specify `openshift_hosted_registry_kind=glusterfs`, the following
+variables also control configuration behavior:
+
+| Name | Default value | Description |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume |
+
Dependencies
------------
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index d73f339f7..0391e5602 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -24,8 +24,14 @@ openshift_hosted_routers:
ports:
- 80:80
- 443:443
- certificates: "{{ openshift_hosted_router_certificate | default({}) }}"
+ certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
-openshift_hosted_router_certificates: {}
+openshift_hosted_router_certificate: {}
openshift_hosted_registry_cert_expire_days: 730
+openshift_hosted_router_create_certificate: True
+
+os_firewall_allow:
+- service: Docker Registry Port
+ port: 5000/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 9626c23c1..9e3f37130 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -15,3 +15,8 @@ dependencies:
- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
+- role: os_firewall
+ os_firewall_allow:
+ - service: Docker Registry Port
+ port: 5000/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 0b8042473..b946ec8ca 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -56,12 +56,17 @@
openshift_hosted_registry_force:
- False
+- name: Update registry environment variables when pushing via dns
+ set_fact:
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
+ when: openshift_push_via_dns | default(false) | bool
+
- name: Create the registry service account
oc_serviceaccount:
name: "{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
-- name: Grant the registry serivce account access to the appropriate scc
+- name: Grant the registry service account access to the appropriate scc
oc_adm_policy_user:
user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
@@ -109,7 +114,7 @@
type: persistentVolumeClaim
claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
@@ -123,3 +128,36 @@
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+
+- name: Ensure OpenShift registry correctly rolls out (best-effort today)
+ command: |
+ oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig
+ async: 600
+ poll: 15
+ failed_when: false
+
+- name: Determine the latest version of the OpenShift registry deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: openshift_hosted_registry_latest_version
+
+- name: Sanity-check that the OpenShift registry rolled out correctly
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_registry_rc_phase
+ until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
+
+- include: storage/glusterfs.yml
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 8a159bf73..29c164f52 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -53,7 +53,8 @@
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- "{{ docker_registry_service_ip.results.clusterip }}"
- - docker-registry.default.svc.cluster.local
+ - "{{ openshift_hosted_registry_name }}.default.svc"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
- "{{ docker_registry_route_hostname }}"
cert: "{{ openshift_master_config_dir }}/registry.crt"
key: "{{ openshift_master_config_dir }}/registry.key"
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
new file mode 100644
index 000000000..c2954fde1
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -0,0 +1,92 @@
+---
+- name: Get registry DeploymentConfig
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: dc
+ name: "{{ openshift_hosted_registry_name }}"
+ register: registry_dc
+
+- name: Wait for registry pods
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: pod
+ selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
+ register: registry_pods
+ until:
+ - "registry_pods.results.results[0]['items'] | count > 0"
+ # There must be as many matching pods with 'Ready' status True as there are expected replicas
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ delay: 10
+ retries: "{{ (600 / 10) | int }}"
+
+- name: Determine registry fsGroup
+ set_fact:
+ openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}"
+
+- name: Create temp mount directory
+ command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Mount registry volume
+ mount:
+ state: mounted
+ fstype: glusterfs
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% else %}{% set node = groups.glusterfs[0] %}{% endif %}{% if 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ name: "{{ mktemp.stdout }}"
+
+- name: Set registry volume permissions
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: directory
+ group: "{{ openshift_hosted_registry_fsgroup }}"
+ mode: "2775"
+ recurse: True
+
+- block:
+ - name: Activate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+
+ - name: Get first registry pod name
+ set_fact:
+ registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}"
+
+ - name: Copy current registry contents to new GlusterFS volume
+ command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
+ when: openshift.hosted.registry.storage.glusterfs.swapcopy
+
+ - name: Swap new GlusterFS registry volume
+ oc_volume:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ vol_name: registry-storage
+ mount_type: pvc
+ claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+
+ - name: Deactivate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ state: absent
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Unmount registry volume
+ mount:
+ state: unmounted
+ name: "{{ mktemp.stdout }}"
+
+- name: Delete temp mount directory
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 3dde83bee..8aaba0f3c 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,20 +1,4 @@
---
-- name: Assert supported openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
- msg: >
- Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
- is not currently supported
-
-- name: Assert implemented openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
- msg: >
- Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
- not implemented yet
-
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml
index 26f921f15..318969885 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml
@@ -2,14 +2,10 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.accesskey | default(none) is not none
- - openshift.hosted.registry.storage.s3.secretkey | default(none) is not none
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- openshift.hosted.registry.storage.s3.region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
- openshift_hosted_registry_storage_s3_accesskey
- openshift_hosted_registry_storage_s3_secretkey
openshift_hosted_registry_storage_s3_bucket
openshift_hosted_registry_storage_s3_region
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 0861b9ec2..dd485a64a 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -14,13 +14,39 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+# This is for when we desire a cluster signed cert
+# The certificate is generated and placed in master_config_dir/
+- block:
+ - name: generate a default wildcard router certificate
+ oc_adm_ca_server_cert:
+ signer_cert: "{{ openshift_master_config_dir }}/ca.crt"
+ signer_key: "{{ openshift_master_config_dir }}/ca.key"
+ signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+ hostnames:
+ - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
+ key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
+ with_items: "{{ openshift_hosted_routers }}"
+
+ - name: set the openshift_hosted_router_certificate
+ set_fact:
+ openshift_hosted_router_certificate:
+ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
+ keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
+ cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
+
+ # End Block
+ when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
+
- name: Get the certificate contents for router
copy:
backup: True
dest: "/etc/origin/master/{{ item | basename }}"
src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: not openshift_hosted_router_create_certificate | bool
- name: Create the router service account(s)
oc_serviceaccount:
@@ -29,7 +55,7 @@
state: present
with_items: "{{ openshift_hosted_routers }}"
-- name: Grant the router serivce account(s) access to the appropriate scc
+- name: Grant the router service account(s) access to the appropriate scc
oc_adm_policy_user:
user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}"
namespace: "{{ item.namespace }}"
@@ -56,25 +82,44 @@
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
- cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}"
- key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}"
- cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}"
+ cert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else omit }}"
+ key_file: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else omit }}"
+ cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.cafile | basename)) if 'cafile' in item.certificate else omit }}"
edits: "{{ openshift_hosted_router_edits | union(item.edits) }}"
ports: "{{ item.ports }}"
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
- register: routerout
-# This should probably move to module
-- name: wait for deploy
- pause:
- seconds: 30
- when: routerout.changed
+- name: Ensure OpenShift router correctly rolls out (best-effort today)
+ command: |
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace | default('default') }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig
+ async: 600
+ poll: 15
+ with_items: "{{ openshift_hosted_routers }}"
+ failed_when: false
-- name: Ensure router replica count matches desired
- oc_scale:
- kind: dc
- name: "{{ item.name | default('router') }}"
- namespace: "{{ item.namespace | default('default') }}"
- replicas: "{{ item.replicas }}"
+- name: Determine the latest version of the OpenShift router deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: openshift_hosted_routers_latest_version
with_items: "{{ openshift_hosted_routers }}"
+
+- name: Poll for OpenShift router deployment success
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ --namespace {{ item.0.namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_router_rc_phase
+ until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
+ with_together:
+ - "{{ openshift_hosted_routers }}"
+ - "{{ openshift_hosted_routers_latest_version.results }}"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index ca6a23f21..fc9272679 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -10,14 +10,21 @@ storage:
blobdescriptor: inmemory
{% if openshift_hosted_registry_storage_provider | default('') == 's3' %}
s3:
+{% if openshift_hosted_registry_storage_s3_accesskey is defined %}
accesskey: {{ openshift_hosted_registry_storage_s3_accesskey }}
+{% endif %}
+{% if openshift_hosted_registry_storage_s3_secretkey is defined %}
secretkey: {{ openshift_hosted_registry_storage_s3_secretkey }}
+{% endif %}
region: {{ openshift_hosted_registry_storage_s3_region }}
{% if openshift_hosted_registry_storage_s3_regionendpoint is defined %}
regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }}
{% endif %}
bucket: {{ openshift_hosted_registry_storage_s3_bucket }}
- encrypt: false
+ encrypt: {{ openshift_hosted_registry_storage_s3_encrypt | default(false) }}
+{% if openshift_hosted_registry_storage_s3_kmskeyid is defined %}
+ keyid: {{ openshift_hosted_registry_storage_s3_kmskeyid }}
+{% endif %}
secure: true
v4auth: true
rootdirectory: {{ openshift_hosted_registry_storage_s3_rootdirectory | default('/registry') }}
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
index 12ffe777d..680303853 100644
--- a/roles/openshift_hosted_logging/README.md
+++ b/roles/openshift_hosted_logging/README.md
@@ -22,7 +22,7 @@
- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
-- openshift_hosted_logging_use_journal: If this is unset or empty, logging will try to figure out from docker which log driver it is using (json-file or journald). You can set this param to "true" or "false" to force logging to use journal or not (but make sure you are sure which one docker is using).
+- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE*
- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location.
- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources.
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index afd82766f..78b624109 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -36,7 +36,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+ failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
- name: "Create templates for logging accounts and the deployer"
command: >
@@ -60,21 +60,21 @@
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+ failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
- name: "Set permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- name: "Set additional permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
command: >
@@ -82,13 +82,13 @@
policy add-cluster-role-to-user rolebinding-reader \
system:serviceaccount:logging:aggregated-logging-elasticsearch
register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+ failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
- name: "Create ConfigMap for deployer parameters"
command: >
{{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+ failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
- name: "Process the deployer template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 33320e9c8..4b350b244 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -26,8 +26,7 @@ kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_
cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}"
cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}"
ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}"
-use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal | default(none) is not none else '' }}"
journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}"
journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}"
ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}"
-deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ use_journal_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
+deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 6a442cefc..15dd1bd54 100644
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -81,7 +81,7 @@
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
changed_when: metrics_deployer_secret.rc == 0
- failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+ failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
# TODO: extend this to allow user passed in certs or generating cert with
# OpenShift CA
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index c67058696..5abb2ef83 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -223,7 +223,7 @@ items:
-
description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
index 6ead122c5..1d319eab8 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
@@ -105,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -118,7 +118,7 @@ parameters:
description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
--
+-
description: "Set to true to continue even if the deployer runs into an error."
name: CONTINUE_ON_ERROR
value: "false"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 28feac4e6..8bf98ba41 100644
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -103,9 +103,9 @@ parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
- - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.5", set version "3.5"'
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.6", set version "v3.6"'
name: IMAGE_VERSION
- value: "3.5"
+ value: "v3.6"
- description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
name: OPENSHIFT_OAUTH_PROVIDER_URL
required: true
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..80cc4233b 100644
--- a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
diff --git a/roles/openshift_loadbalancer/README.md b/roles/openshift_loadbalancer/README.md
index bea4c509b..330895f20 100644
--- a/roles/openshift_loadbalancer/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -25,6 +25,7 @@ From this role:
| openshift_loadbalancer_default_maxconn | 20000 | Maximum per-process number of concurrent connections. |
| openshift_loadbalancer_frontends | none | List of frontends. See example below. |
| openshift_loadbalancer_backends | none | List of backends. See example below. |
+| openshift_image_tag | none | Image tag for containerized haproxy image. |
Dependencies
------------
@@ -64,6 +65,7 @@ Example Playbook
- name: master3
address: "192.168.122.223:8443"
opts: check
+ openshift_image_tag: v3.6.153
```
License
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 5385df3b7..72182fcdd 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 42f4fc72e..d2ef7cc71 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -52,9 +52,12 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
-- `openshift_logging_fluentd_use_journal`: NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver when using the default of empty.
+- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
+- `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024.
+- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
+
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
@@ -91,11 +94,69 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
-- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
-- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
+
+Elasticsearch can be exposed for external clients outside of the cluster.
+- `openshift_logging_es_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
+Elasticsearch OPS too, if using an OPS cluster:
+- `openshift_logging_es_ops_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_ops_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es-ops." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_ops_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_ops_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ops_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
+
+### mux - secure_forward listener service
+- `openshift_logging_use_mux`: Default `False`. If this is `True`, a service
+ called `mux` will be deployed. This service will act as a Fluentd
+ secure_forward forwarder for the node agent Fluentd daemonsets running in the
+ cluster. This can be used to reduce the number of connections to the
+ OpenShift API server, by using `mux` and configuring each node Fluentd to
+ send raw logs to mux and turn off the k8s metadata plugin.
+- `openshift_logging_mux_allow_external`: Default `False`. If this is `True`,
+ the `mux` service will be deployed, and it will be configured to allow
+ Fluentd clients running outside of the cluster to send logs using
+ secure_forward. This allows OpenShift logging to be used as a central
+ logging service for clients other than OpenShift, or other OpenShift
+ clusters.
+- `openshift_logging_use_mux_client`: Default `False`. If this is `True`, the
+ node agent Fluentd services will be configured to send logs to the mux
+ service rather than directly to Elasticsearch.
+- `openshift_logging_mux_hostname`: Default is "mux." +
+ `openshift_master_default_subdomain`. This is the hostname *external*_
+ clients will use to connect to mux, and will be used in the TLS server cert
+ subject.
+- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_memory_limit`: 512Mi
+- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
+ first value in the list is the namespace to use for undefined projects,
+ followed by any additional namespaces to create by default - users will
+ typically not need to set this
+- `openshift_logging_mux_namespaces`: Default `[]` - additional namespaces to
+ create for _external_ mux clients to associate with their logs - users will
+ need to set this
+- `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux.
+- `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 96ed44011..1c243f934 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
+openshift_logging_nodeselector: null
+openshift_logging_labels: {}
+openshift_logging_label_key: ""
+openshift_logging_label_value: ""
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
@@ -22,10 +26,10 @@ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
-openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -46,10 +50,10 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
-openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -68,10 +72,11 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
+openshift_logging_fluentd_buffer_queue_limit: 1024
+openshift_logging_fluentd_buffer_size_limit: 1m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -83,7 +88,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: null
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -95,6 +100,22 @@ openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
openshift_logging_es_number_of_replicas: 0
+# for exposing es to external (outside of the cluster) clients
+openshift_logging_es_allow_external: False
+openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es certs
+openshift_logging_es_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es certs
+openshift_logging_es_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es certs
+openshift_logging_es_ca_ext: ""
+
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -106,19 +127,47 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_es_ops_number_of_shards: 1
-openshift_logging_es_ops_number_of_replicas: 0
+
+# for exposing es-ops to external (outside of the cluster) clients
+openshift_logging_es_ops_allow_external: False
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_ca_ext: ""
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+# mux - secure_forward listener service
+openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
+# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
+openshift_logging_use_mux_client: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 1Gi
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
@@ -127,3 +176,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+#fluentd_mux_config_contents:
+#fluentd_mux_secureforward_contents:
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
deleted file mode 100644
index 334c9402b..000000000
--- a/roles/openshift_logging/files/logging-deployer-sa.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: logging-deployer
-secrets:
-- name: logging-deployer
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 44b0b2d48..eac086e81 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -42,7 +42,7 @@ def map_from_pairs(source, delim="="):
if source == '':
return dict()
- return dict(source.split(delim) for item in source.split(","))
+ return dict(item.split(delim) for item in source.split(","))
# pylint: disable=too-few-public-methods
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 64bc33435..35accfb78 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -37,7 +37,7 @@ LOGGING_INFRA_KEY = "logging-infra"
# selectors for filtering resources
DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
-ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
+ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
@@ -318,7 +318,7 @@ def main():
''' The main method '''
module = AnsibleModule( # noqa: F405
argument_spec=dict(
- admin_kubeconfig={"required": True, "type": "str"},
+ admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
oc_bin={"required": True, "type": "str"},
openshift_logging_namespace={"required": True, "type": "str"}
),
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 188ea246c..6d023a02d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -1,113 +1,97 @@
---
-- name: stop logging
- include: stop_cluster.yaml
-
# delete the deployment objects that we had created
- name: delete logging api objects
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: "{{ item }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra"
with_items:
- dc
- rc
- svc
- routes
- templates
- - daemonset
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
+ - ds
# delete the oauthclient
- name: delete oauthclient kibana-proxy
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: oauthclient
+ namespace: "{{ openshift_logging_namespace }}"
+ name: kibana-proxy
# delete any image streams that we may have created
- name: delete logging is
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: is
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra=support"
# delete our old secrets
- name: delete logging secrets
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_secret:
+ state: absent
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-fluentd
- logging-elasticsearch
- logging-kibana
- logging-kibana-proxy
- logging-curator
- ignore_errors: yes
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete role bindings
-- name: delete rolebindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - logging-elasticsearch-view-role
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster role bindings
-- name: delete cluster role bindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster roles
-- name: delete cluster roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ - logging-mux
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- name: "{{ item }}"
- namespace: "{{ openshift_logging_namespace }}"
state: absent
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- aggregated-logging-elasticsearch
- aggregated-logging-kibana
- aggregated-logging-curator
- aggregated-logging-fluentd
-# delete our roles
-- name: delete roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+# delete role bindings
+- name: delete rolebindings
+ oc_obj:
+ state: absent
+ kind: rolebinding
+ namespace: "{{ openshift_logging_namespace }}"
+ name: logging-elasticsearch-view-role
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ oc_obj:
+ state: absent
+ kind: clusterrolebindings
+ namespace: "{{ openshift_logging_namespace }}"
+ name: rolebinding-reader
+
+# delete cluster roles
+- name: delete cluster roles
+ oc_obj:
+ state: absent
+ kind: clusterrole
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
+ - rolebinding-reader
- daemonset-admin
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
# delete our configmaps
- name: delete configmaps
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: configmap
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-curator
- logging-elasticsearch
- logging-fluentd
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ - logging-mux
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 740e490e1..9c8f0986a 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
@@ -45,6 +45,39 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: mux
+ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
+ when: openshift_logging_use_mux | bool
+
+- include: procure_shared_key.yaml
+ loop_control:
+ loop_var: shared_key_info
+ with_items:
+ - procure_component: mux
+ when: openshift_logging_use_mux | bool
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es
+ hostnames: "es, {{openshift_logging_es_hostname}}"
+ when: openshift_logging_es_allow_external | bool
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es-ops
+ hostnames: "es-ops, {{openshift_logging_es_ops_hostname}}"
+ when:
+ - openshift_logging_es_allow_external | bool
+ - openshift_logging_use_ops | bool
+
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
when: server_tls_json is undefined
@@ -85,6 +118,22 @@
loop_control:
loop_var: node_name
+- name: Generate PEM cert for mux
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.mux
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_use_mux | bool
+
+- name: Generate PEM cert for Elasticsearch external route
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.es
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_es_allow_external | bool
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
deleted file mode 100644
index 56f590717..000000000
--- a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Generate ClusterRoleBindings
- template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
- vars:
- acct_name: aggregated-logging-elasticsearch
- obj_name: rolebinding-reader
- crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
- subjects:
- - kind: ServiceAccount
- name: "{{acct_name}}"
- namespace: "{{openshift_logging_namespace}}"
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
deleted file mode 100644
index 0b8b1014c..000000000
--- a/roles/openshift_logging/tasks/generate_clusterroles.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Generate ClusterRole for cluster-reader
- template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
- vars:
- obj_name: rolebinding-reader
- rules:
- - resources: [clusterrolebindings]
- verbs:
- - get
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
deleted file mode 100644
index 253543f54..000000000
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
----
-- block:
- - fail:
- msg: "The openshift_logging_es_log_appenders '{{openshift_logging_es_log_appenders}}' has an unrecognized option and only supports the following as a list: {{es_log_appenders | join(', ')}}"
- when:
- - es_logging_contents is undefined
- - "{{ openshift_logging_es_log_appenders | list | difference(es_log_appenders) | length != 0 }}"
- changed_when: no
-
- - template:
- src: elasticsearch-logging.yml.j2
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- when: es_logging_contents is undefined
- changed_when: no
- check_mode: no
-
- - local_action: >
- template src=elasticsearch.yml.j2
- dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
- vars:
- - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
- when: es_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{ config_source | combine(override_config,recursive=True) | to_nice_yaml }}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- vars:
- config_source: "{{lookup('file','{{local_tmp.stdout}}/elasticsearch-gen-template.yml') | from_yaml }}"
- override_config: "{{openshift_logging_es_config | from_yaml}}"
- when: es_logging_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{es_logging_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
- changed_when: no
-
- - copy:
- content: "{{es_config_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
- --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
- register: es_configmap
- changed_when: no
-
- - copy:
- content: "{{es_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
- when: es_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: curator.yml
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{curator_config_contents}}"
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
- --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
- register: curator_configmap
- changed_when: no
-
- - copy:
- content: "{{curator_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
- when: curator_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: fluent.conf
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is undefined
- changed_when: no
-
- - copy:
- src: fluentd-throttle-config.yaml
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is undefined
- changed_when: no
-
- - copy:
- src: secure-forward.conf
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{fluentd_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_throttle_contents}}"
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_secureforward_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
- --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
- --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
- register: fluentd_configmap
- changed_when: no
-
- - copy:
- content: "{{fluentd_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
- when: fluentd_configmap.stdout is defined
- changed_when: no
- check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
deleted file mode 100644
index 8aea4e81f..000000000
--- a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
- vars:
- component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
- vars:
- component: es
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
- vars:
- component: es-ops
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
- vars:
- component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- openshift_logging_es_host: logging-es-ops
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
index 289b72ea6..e8cececfb 100644
--- a/roles/openshift_logging/tasks/generate_pems.yaml
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -15,6 +15,7 @@
-subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
when:
- not key_file.stat.exists
+ - cert_ext is defined
- cert_ext.stdout is defined
check_mode: no
@@ -24,7 +25,7 @@
-subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
when:
- not key_file.stat.exists
- - cert_ext.stdout is undefined
+ - cert_ext is undefined or cert_ext is defined and cert_ext.stdout is undefined
check_mode: no
- name: Sign cert request with CA for {{component}}
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
deleted file mode 100644
index fa7a86c27..000000000
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Init pool of PersistentVolumeClaim names
- set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
- vars:
- pvc_name: "{{es_pvc_prefix}}-{{item| int}}"
- start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}"
- with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}}
- when:
- - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}"
- - es_pvc_size | search('^\d.*')
- check_mode: no
-
-- name: Generating PersistentVolumeClaims
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool | default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - not es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
-
-- name: Generating PersistentVolumeClaims - Dynamic
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool|default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
deleted file mode 100644
index 7dc9530df..000000000
--- a/roles/openshift_logging/tasks/generate_rolebindings.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Generate RoleBindings
- template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
- vars:
- obj_name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
deleted file mode 100644
index e77da7a24..000000000
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
- changed_when: false
-
-- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: "{{openshift_logging_kibana_cert | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: "{{openshift_logging_kibana_ca | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when: kibana_ca is not defined
- changed_when: false
-
-- name: Generating logging routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana"
- route_host: "{{openshift_logging_kibana_hostname}}"
- service_name: "logging-kibana"
- tls_key: "{{kibana_key | default('') | b64decode}}"
- tls_cert: "{{kibana_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- changed_when: no
-
-- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}"
- changed_when: false
-
-- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when:
- - openshift_logging_use_ops | bool
- - kibana_ops_ca is not defined
- changed_when: false
-
-- name: Generating logging ops routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana-ops"
- route_host: "{{openshift_logging_kibana_ops_hostname}}"
- service_name: "logging-kibana-ops"
- tls_key: "{{kibana_ops_key | default('') | b64decode}}"
- tls_cert: "{{kibana_ops_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ops_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- when: openshift_logging_use_ops | bool
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
deleted file mode 100644
index f396bcc6d..000000000
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-- name: Retrieving the cert to use when generating secrets for the logging components
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "kibana_key", file: "system.logging.kibana.key"}
- - { name: "kibana_cert", file: "system.logging.kibana.crt"}
- - { name: "curator_key", file: "system.logging.curator.key"}
- - { name: "curator_cert", file: "system.logging.curator.crt"}
- - { name: "fluentd_key", file: "system.logging.fluentd.key"}
- - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
- - { name: "kibana_internal_key", file: "kibana-internal.key"}
- - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
- - { name: "server_tls", file: "server-tls.json"}
-
-- name: Generating secrets for logging components
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- secret_keys: ["ca", "cert", "key"]
- with_items:
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
-
-- name: Generating secrets for kibana proxy
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: logging-kibana-proxy
- secrets:
- - {key: oauth-secret, value: "{{oauth_secret}}"}
- - {key: session-secret, value: "{{session_secret}}"}
- - {key: server-key, value: "{{kibana_key_file}}"}
- - {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls, value: "{{server_tls_file}}"}
- secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
- kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
- kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
- server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
- check_mode: no
- changed_when: no
-
-- name: Generating secrets for elasticsearch
- command: >
- {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
- key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
- searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
- admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
- admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
- vars:
- secret_name: logging-elasticsearch
- secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"]
- register: logging_es_secret
- check_mode: no
- changed_when: no
-
-- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
- when: logging_es_secret.stdout is defined
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
deleted file mode 100644
index 21bcdfecb..000000000
--- a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Generating serviceaccounts
- template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
- vars:
- obj_name: aggregated-logging-{{component}}
- with_items:
- - elasticsearch
- - kibana
- - fluentd
- - curator
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
deleted file mode 100644
index 5091c1209..000000000
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
----
-- name: Generating logging-es service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
- vars:
- obj_name: logging-es
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
- vars:
- obj_name: logging-es-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
- vars:
- obj_name: logging-kibana
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
- vars:
- obj_name: logging-es-ops
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
- vars:
- obj_name: logging-es-ops-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
- vars:
- obj_name: logging-kibana-ops
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
deleted file mode 100644
index ab8e207f1..000000000
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Check Curator current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Curator ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
- vars:
- component: curator
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
- replicas: "{{curator_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
- replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
deleted file mode 100644
index 28fad420b..000000000
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ /dev/null
@@ -1,146 +0,0 @@
----
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
-
-- set_fact: es_pvc_pool={{[]}}
-
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
- vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
- loop_control:
- loop_var: deploy_name
-
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
- vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
-
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
-
-# --------- Tasks for Operation clusters ---------
-
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}"
-
-- name: Validate Elasticsearch cluster size for Ops
- fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
- vars:
- es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
- check_mode: no
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
- vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- loop_control:
- loop_var: deploy_name
- when:
- - openshift_logging_use_ops | bool
-
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
- vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
- es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
deleted file mode 100644
index 35273829c..000000000
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
- check_mode: no
-
-- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
- check_mode: no
-
-- name: Generating Fluentd daemonset
- template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
- vars:
- daemonset_name: logging-fluentd
- daemonset_component: fluentd
- daemonset_container_name: fluentd-elasticsearch
- daemonset_serviceAccount: aggregated-logging-fluentd
- ops_host: "{{ fluentd_ops_host }}"
- ops_port: "{{ fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
- check_mode: no
- changed_when: no
-
-- name: "Check fluentd privileged permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc/privileged -o jsonpath='{.users}'
- register: fluentd_privileged
- check_mode: no
- changed_when: no
-
-- name: "Set privileged permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
- check_mode: no
- when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
-
-- name: "Check fluentd cluster-reader permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
- register: fluentd_cluster_reader
- check_mode: no
- changed_when: no
-
-- name: "Set cluster-reader permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
- check_mode: no
- when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
deleted file mode 100644
index 52bdeb50d..000000000
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Check Kibana current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Kibana ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
- vars:
- component: kibana
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
- replicas: "{{kibana_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
- replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 83b68fa77..464e8594f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -2,85 +2,277 @@
- name: Gather OpenShift Logging Facts
openshift_logging_facts:
oc_bin: "{{openshift.common.client_binary}}"
- admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
openshift_logging_namespace: "{{openshift_logging_namespace}}"
- tags: logging_facts
+
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labeling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
+
+- name: Labeling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
+
+- name: Create logging cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/logging"
+ state: directory
+ mode: 0755
+ changed_when: False
check_mode: no
-- name: Validate Elasticsearch cluster size
- fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
-
-- name: Validate Elasticsearch Ops cluster size
- fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
-
-- name: Install logging
- include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
- when: openshift_hosted_logging_install | default(true) | bool
- with_items:
- - support
- - elasticsearch
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: install_component
-
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
- register: object_def_files
- changed_when: no
-
-- slurp: src={{item}}
- register: object_defs
- with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
- changed_when: no
-
-- name: Create objects
- include: oc_apply.yaml
+- include: generate_certs.yaml
vars:
- - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- - namespace: "{{ openshift_logging_namespace }}"
- - file_name: "{{ file.source }}"
- - file_content: "{{ file.content | b64decode | from_yaml }}"
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: not ansible_check_mode
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
-- include: update_master_config.yaml
+## Elasticsearch
-- name: Printing out objects to create
- debug: msg={{file.content | b64decode }}
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: ansible_check_mode
-
- # TODO replace task with oc_secret module that supports
- # linking when available
-- name: Link Pull Secrets With Service Accounts
- include: oc_secret.yaml
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_indices=[]
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count == 0
+
+- set_fact: openshift_logging_es_pvc_prefix="logging-es"
+ when: openshift_logging_es_pvc_prefix == ""
+
+- set_fact:
+ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}"
+
+# We don't allow scaling down of ES nodes currently
+- include_role:
+ name: openshift_logging_elasticsearch
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- subcommand: link
- service_account: "{{sa_account}}"
- secret_name: "{{openshift_logging_image_pull_secret}}"
- add_args: "--for=pull"
- with_items:
- - default
- - aggregated-logging-elasticsearch
- - aggregated-logging-kibana
- - aggregated-logging-fluentd
- - aggregated-logging-curator
- register: link_pull_secret
- loop_control:
- loop_var: sa_account
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch.pvcs }}"
+ - "{{ es_indices }}"
when:
- - openshift_logging_image_pull_secret is defined
- - openshift_logging_image_pull_secret != ''
- failed_when: link_pull_secret.rc != 0
+ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
-- name: Scaling up cluster
- include: start_cluster.yaml
- when: start_cluster | default(true) | bool
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_ops_indices=[]
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count == 0
+
+- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
+ when: openshift_logging_es_ops_pvc_prefix == ""
+
+- set_fact:
+ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
+ openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
+ openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
+ openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
+ openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
+ openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
+ - "{{ es_ops_indices }}"
+ when:
+ - openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
+
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
+ openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
+ openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
+ openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
+ openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
+ openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Kibana
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_ops_deployment: true
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
+ openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
+ openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
+ openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
+ openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
+ openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Curator
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_ops_deployment: true
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+## Mux
+- include_role:
+ name: openshift_logging_mux
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ when:
+ - openshift_logging_use_mux | bool
+
+
+## Fluentd
+- include_role:
+ name: openshift_logging_fluentd
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
+
+- include: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
deleted file mode 100644
index da0bbb627..000000000
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-# This is the base configuration for installing the other components
-- name: Check for logging project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
- register: logging_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
-
-- name: "Create logging project"
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in logging_project_result.stderr
-
-- name: Create logging cert directory
- file: path={{openshift.common.config_base}}/logging state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_certs.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- name: Create temp directory for all our templates
- file: path={{mktemp.stdout}}/templates state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_secrets.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- include: generate_configmaps.yaml
-
-- include: generate_services.yaml
-
-- name: Generate kibana-proxy oauth client
- template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
- vars:
- secret: "{{oauth_secret}}"
- when: oauth_secret is defined
- check_mode: no
- changed_when: no
-
-- include: generate_clusterroles.yaml
-
-- include: generate_rolebindings.yaml
-
-- include: generate_clusterrolebindings.yaml
-
-- include: generate_serviceaccounts.yaml
-
-- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c7f4a2f93..f475024dd 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- fail:
msg: Only one Fluentd nodeselector key pair should be provided
- when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+ when: openshift_logging_fluentd_nodeselector.keys() | count > 1
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
@@ -28,34 +28,14 @@
register: local_tmp
changed_when: False
check_mode: no
-
-- debug: msg="Created local temp dir {{local_tmp.stdout}}"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
- check_mode: no
- tags: logging_init
+ become: no
- include: "{{ role_path }}/tasks/install_logging.yaml"
when: openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
- when: openshift_logging_upgrade_logging | default(false) | bool
-
- include: "{{ role_path }}/tasks/delete_logging.yaml"
when:
- not openshift_logging_install_logging | default(false) | bool
- - not openshift_logging_upgrade_logging | default(false) | bool
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- tags: logging_cleanup
- changed_when: False
- check_mode: no
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_logging/tasks/oc_secret.yaml b/roles/openshift_logging/tasks/oc_secret.yaml
deleted file mode 100644
index de37e4f6d..000000000
--- a/roles/openshift_logging/tasks/oc_secret.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- secret {{subcommand}} {{service_account}} {{secret_name}}
- {{add_args}}
- -n {{openshift_logging_namespace}}
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 44dd5e894..00de0ca06 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -11,42 +11,48 @@
- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
- when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ when:
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
check_mode: no
- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
- when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ when:
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
check_mode: no
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
check_mode: no
when:
- - cert_info.hostnames is defined
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
check_mode: no
when:
- - cert_info.hostnames is undefined
- - "{{ cert_info.procure_component }}_crt is defined"
- - "{{ cert_info.procure_component }}_key is defined"
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
check_mode: no
when:
- - cert_info.hostnames is undefined
- - "{{ cert_info.procure_component }}_crt is defined"
- - "{{ cert_info.procure_component }}_key is defined"
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml
new file mode 100644
index 000000000..056ff6b98
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_shared_key.yaml
@@ -0,0 +1,25 @@
+---
+- name: Checking for {{ shared_key_info.procure_component }}_shared_key
+ stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key"
+ register: component_shared_key_file
+ check_mode: no
+
+- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }}
+ set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }}
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ check_mode: no
+
+- name: Creating shared_key for {{ shared_key_info.procure_component }}
+ copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}"
+ dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - not component_shared_key_file.stat.exists
+
+- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ - not component_shared_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
deleted file mode 100644
index edbb62c3e..000000000
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: start fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: add
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
deleted file mode 100644
index 4b3722e29..000000000
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: stop fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: absent
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index cef835668..10f522b61 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.loggingPublicURL
yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
deleted file mode 100644
index 30fdbd2af..000000000
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Stop the Cluster
- include: stop_cluster.yaml
-
-- name: Upgrade logging
- include: install_logging.yaml
- vars:
- start_cluster: False
-
-# start ES so that we can run migrate script
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Wait for pods to start
- oc_obj:
- state: list
- kind: pods
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: running_pod
- until: running_pod.results.results[0]['items'] | selectattr('status.phase', 'match', '^Running$') | map(attribute='metadata.name') | list | length != 0
- retries: 30
- delay: 10
-
-- name: Run upgrade script
- script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
- register: script_output
- changed_when:
- - script_output.rc == 0
- - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
-
-- name: Start up rest of cluster
- include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
deleted file mode 100644
index 0d28db48e..000000000
--- a/roles/openshift_logging/templates/clusterrole.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: v1
-kind: ClusterRole
-metadata:
- name: {{obj_name}}
-rules:
-{% for rule in rules %}
-- resources:
-{% for kind in rule.resources %}
- - {{ kind }}
-{% endfor %}
- apiGroups:
-{% if rule.api_groups is defined %}
-{% for group in rule.api_groups %}
- - {{ group }}
-{% endfor %}
-{% endif %}
- verbs:
-{% for verb in rule.verbs %}
- - {{ verb }}
-{% endfor %}
-{% endfor %}
diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial
deleted file mode 100644
index ccd01a816..000000000
--- a/roles/openshift_logging/templates/es-storage-emptydir.partial
+++ /dev/null
@@ -1 +0,0 @@
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial
deleted file mode 100644
index 07ddad9ba..000000000
--- a/roles/openshift_logging/templates/es-storage-hostpath.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- hostPath:
- path: {{es_storage['path']}}
diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial
deleted file mode 100644
index fcbff68de..000000000
--- a/roles/openshift_logging/templates/es-storage-pvc.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- persistentVolumeClaim:
- claimName: {{es_storage['pvc_claim']}}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
deleted file mode 100644
index 0bf1686ad..000000000
--- a/roles/openshift_logging/templates/fluentd.j2
+++ /dev/null
@@ -1,149 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: "DaemonSet"
-metadata:
- name: "{{daemonset_name}}"
- labels:
- provider: openshift
- component: "{{daemonset_component}}"
- logging-infra: "{{daemonset_component}}"
-spec:
- selector:
- matchLabels:
- provider: openshift
- component: "{{daemonset_component}}"
- updateStrategy:
- type: RollingUpdate
- rollingUpdate:
- minReadySeconds: 600
- template:
- metadata:
- name: "{{daemonset_container_name}}"
- labels:
- logging-infra: "{{daemonset_component}}"
- provider: openshift
- component: "{{daemonset_component}}"
- spec:
- serviceAccountName: "{{daemonset_serviceAccount}}"
- nodeSelector:
- {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}"
- containers:
- - name: "{{daemonset_container_name}}"
- image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
- imagePullPolicy: Always
- securityContext:
- privileged: true
- resources:
- limits:
- cpu: {{openshift_logging_fluentd_cpu_limit}}
- memory: {{openshift_logging_fluentd_memory_limit}}
- volumeMounts:
- - name: runlogjournal
- mountPath: /run/log/journal
- - name: varlog
- mountPath: /var/log
- - name: varlibdockercontainers
- mountPath: /var/lib/docker/containers
- readOnly: true
- - name: config
- mountPath: /etc/fluent/configs.d/user
- readOnly: true
- - name: certs
- mountPath: /etc/fluent/keys
- readOnly: true
- - name: dockerhostname
- mountPath: /etc/docker-hostname
- readOnly: true
- - name: localtime
- mountPath: /etc/localtime
- readOnly: true
- - name: dockercfg
- mountPath: /etc/sysconfig/docker
- readOnly: true
- env:
- - name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
- - name: "ES_HOST"
- value: "{{openshift_logging_es_host}}"
- - name: "ES_PORT"
- value: "{{openshift_logging_es_port}}"
- - name: "ES_CLIENT_CERT"
- value: "{{openshift_logging_es_client_cert}}"
- - name: "ES_CLIENT_KEY"
- value: "{{openshift_logging_es_client_key}}"
- - name: "ES_CA"
- value: "{{openshift_logging_es_ca}}"
- - name: "OPS_HOST"
- value: "{{ops_host}}"
- - name: "OPS_PORT"
- value: "{{ops_port}}"
- - name: "OPS_CLIENT_CERT"
- value: "{{openshift_logging_es_ops_client_cert}}"
- - name: "OPS_CLIENT_KEY"
- value: "{{openshift_logging_es_ops_client_key}}"
- - name: "OPS_CA"
- value: "{{openshift_logging_es_ops_ca}}"
- - name: "ES_COPY"
- value: "{{openshift_logging_fluentd_es_copy|lower}}"
- - name: "ES_COPY_HOST"
- value: "{{es_copy_host | default('')}}"
- - name: "ES_COPY_PORT"
- value: "{{es_copy_port | default('')}}"
- - name: "ES_COPY_SCHEME"
- value: "{{es_copy_scheme | default('https')}}"
- - name: "ES_COPY_CLIENT_CERT"
- value: "{{es_copy_client_cert | default('')}}"
- - name: "ES_COPY_CLIENT_KEY"
- value: "{{es_copy_client_key | default('')}}"
- - name: "ES_COPY_CA"
- value: "{{es_copy_ca | default('')}}"
- - name: "ES_COPY_USERNAME"
- value: "{{es_copy_username | default('')}}"
- - name: "ES_COPY_PASSWORD"
- value: "{{es_copy_password | default('')}}"
- - name: "OPS_COPY_HOST"
- value: "{{ops_copy_host | default('')}}"
- - name: "OPS_COPY_PORT"
- value: "{{ops_copy_port | default('')}}"
- - name: "OPS_COPY_SCHEME"
- value: "{{ops_copy_scheme | default('https')}}"
- - name: "OPS_COPY_CLIENT_CERT"
- value: "{{ops_copy_client_cert | default('')}}"
- - name: "OPS_COPY_CLIENT_KEY"
- value: "{{ops_copy_client_key | default('')}}"
- - name: "OPS_COPY_CA"
- value: "{{ops_copy_ca | default('')}}"
- - name: "OPS_COPY_USERNAME"
- value: "{{ops_copy_username | default('')}}"
- - name: "OPS_COPY_PASSWORD"
- value: "{{ops_copy_password | default('')}}"
- - name: "USE_JOURNAL"
- value: "{{openshift_logging_fluentd_use_journal|lower}}"
- - name: "JOURNAL_SOURCE"
- value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- - name: "JOURNAL_READ_FROM_HEAD"
- value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
- volumes:
- - name: runlogjournal
- hostPath:
- path: /run/log/journal
- - name: varlog
- hostPath:
- path: /var/log
- - name: varlibdockercontainers
- hostPath:
- path: /var/lib/docker/containers
- - name: config
- configMap:
- name: logging-fluentd
- - name: certs
- secret:
- secretName: logging-fluentd
- - name: dockerhostname
- hostPath:
- path: /etc/hostname
- - name: localtime
- hostPath:
- path: /etc/localtime
- - name: dockercfg
- hostPath:
- path: /etc/sysconfig/docker
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
deleted file mode 100644
index e6ecf82ff..000000000
--- a/roles/openshift_logging/templates/kibana.j2
+++ /dev/null
@@ -1,116 +0,0 @@
-apiVersion: "v1"
-kind: "DeploymentConfig"
-metadata:
- name: "{{deploy_name}}"
- labels:
- provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
-spec:
- replicas: {{replicas|default(0)}}
- selector:
- provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
- strategy:
- rollingParams:
- intervalSeconds: 1
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- name: "{{deploy_name}}"
- labels:
- logging-infra: "{{logging_component}}"
- provider: openshift
- component: "{{component}}"
- spec:
- serviceAccountName: aggregated-logging-kibana
-{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
- nodeSelector:
-{% for key, value in kibana_node_selector.iteritems() %}
- {{key}}: "{{value}}"
-{% endfor %}
-{% endif %}
- containers:
- -
- name: "kibana"
- image: {{image}}
- imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
- resources:
- limits:
-{% if kibana_cpu_limit is not none %}
- cpu: "{{kibana_cpu_limit}}"
-{% endif %}
-{% if kibana_memory_limit is not none %}
- memory: "{{kibana_memory_limit}}"
-{% endif %}
-{% endif %}
- env:
- - name: "ES_HOST"
- value: "{{es_host}}"
- - name: "ES_PORT"
- value: "{{es_port}}"
- volumeMounts:
- - name: kibana
- mountPath: /etc/kibana/keys
- readOnly: true
- -
- name: "kibana-proxy"
- image: {{proxy_image}}
- imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
- resources:
- limits:
-{% if kibana_proxy_cpu_limit is not none %}
- cpu: "{{kibana_proxy_cpu_limit}}"
-{% endif %}
-{% if kibana_proxy_memory_limit is not none %}
- memory: "{{kibana_proxy_memory_limit}}"
-{% endif %}
-{% endif %}
- ports:
- -
- name: "oaproxy"
- containerPort: 3000
- env:
- -
- name: "OAP_BACKEND_URL"
- value: "http://localhost:5601"
- -
- name: "OAP_AUTH_MODE"
- value: "oauth2"
- -
- name: "OAP_TRANSFORM"
- value: "user_header,token_header"
- -
- name: "OAP_OAUTH_ID"
- value: kibana-proxy
- -
- name: "OAP_MASTER_URL"
- value: {{openshift_logging_master_url}}
- -
- name: "OAP_PUBLIC_MASTER_URL"
- value: {{openshift_logging_master_public_url}}
- -
- name: "OAP_LOGOUT_REDIRECT"
- value: {{openshift_logging_master_public_url}}/console/logout
- -
- name: "OAP_MASTER_CA_FILE"
- value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- -
- name: "OAP_DEBUG"
- value: "{{openshift_logging_kibana_proxy_debug}}"
- volumeMounts:
- - name: kibana-proxy
- mountPath: /secret
- readOnly: true
- volumes:
- - name: kibana
- secret:
- secretName: logging-kibana
- - name: kibana-proxy
- secret:
- secretName: logging-kibana-proxy
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
deleted file mode 100644
index eba4197da..000000000
--- a/roles/openshift_logging/templates/secret.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: "{{secret_name}}"
-type: Opaque
-data:
-{% for s in secrets %}
- "{{s.key}}" : "{{s.value | b64encode}}"
-{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
deleted file mode 100644
index 6c4ec0c76..000000000
--- a/roles/openshift_logging/templates/service.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: "v1"
-kind: "Service"
-metadata:
- name: "{{obj_name}}"
-{% if labels is defined%}
- labels:
-{% for key, value in labels.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% endif %}
-spec:
- ports:
-{% for port in ports %}
- -
-{% for key, value in port.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% if port.targetPort is undefined %}
- clusterIP: "None"
-{% endif %}
-{% endfor %}
-{% if service_targetPort is defined %}
- targetPort: {{service_targetPort}}
-{% endif %}
- selector:
- {% for key, value in selector.iteritems() %}
- {{key}}: {{value}}
- {% endfor %}
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index e06625e3f..e561b41e2 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -1,12 +1,8 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}"
-es_min_masters_default: "{{ (openshift_logging_es_cluster_size | int / 2 | round(0,'floor') + 1) | int }}"
-es_min_masters: "{{ (openshift_logging_es_cluster_size == 1) | ternary(1, es_min_masters_default)}}"
-es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}"
-es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}"
-es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}"
-es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size|int - 1}}"
-es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size|int}}"
+es_node_quorum: "{{ (openshift_logging_es_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size | int}}"
+es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"
es_log_appenders: ['file', 'console']
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 9679d209a..92e68a0a3 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
new file mode 100644
index 000000000..82ffb2f93
--- /dev/null
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -0,0 +1,33 @@
+---
+### General logging settings
+openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local"
+
+openshift_logging_curator_namespace: logging
+
+### Common settings
+openshift_logging_curator_nodeselector: ""
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_es_host: "logging-es"
+openshift_logging_curator_es_port: 9200
+
+# This should not exceed 1, should check for this
+openshift_logging_curator_replicas: 1
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_curator_ops_deployment: false
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#curator_config_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml
index 8d62d8e7d..8d62d8e7d 100644
--- a/roles/openshift_logging/files/curator.yml
+++ b/roles/openshift_logging_curator/files/curator.yml
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
new file mode 100644
index 000000000..6752fb7f9
--- /dev/null
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Curator Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml
new file mode 100644
index 000000000..94f8b4a97
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ curator_version: "{{ __latest_curator_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Curator
+ when: curator_version not in __allowed_curator_versions
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
new file mode 100644
index 000000000..3113fb3c9
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -0,0 +1,113 @@
+---
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# configmap
+- copy:
+ src: curator.yml
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ curator_config_contents }}"
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is defined
+ changed_when: no
+
+- name: Set Curator configmap
+ oc_configmap:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ from_file:
+ config.yaml: "{{ tempdir }}/curator.yml"
+
+# secret
+- name: Set Curator secret
+ oc_secret:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.curator.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.curator.crt"
+
+- set_fact:
+ curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+ curator_component: "{{ 'curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+# DC
+# TODO: scale should not exceed 1
+- name: Generate Curator deploymentconfig
+ template:
+ src: curator.j2
+ dest: "{{ tempdir }}/templates/curator-dc.yaml"
+ vars:
+ component: "{{ curator_component }}"
+ logging_component: curator
+ deploy_name: "{{ curator_name }}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: "{{ openshift_logging_curator_es_host }}"
+ es_port: "{{ openshift_logging_curator_es_port }}"
+ curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
+ curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: Set Curator DC
+ oc_obj:
+ state: present
+ name: "{{ curator_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/curator-dc.yaml"
+ delete_after: true
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index a0fefd882..6431f86d9 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{curator_replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -42,13 +42,13 @@ spec:
resources:
limits:
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
+ value: "{{openshift_logging_curator_master_url}}"
-
name: "ES_HOST"
value: "{{es_host}}"
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
new file mode 100644
index 000000000..97525479e
--- /dev/null
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_curator_version: "3_5"
+__allowed_curator_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
new file mode 100644
index 000000000..c0b5d394e
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -0,0 +1,57 @@
+---
+### Common settings
+openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_elasticsearch_namespace: logging
+
+openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
+openshift_logging_elasticsearch_cpu_limit: 1000m
+openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
+openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
+
+openshift_logging_elasticsearch_replica_count: 1
+
+# ES deployment type
+openshift_logging_elasticsearch_deployment_type: "data-master"
+
+# ES deployment name
+openshift_logging_elasticsearch_deployment_name: ""
+
+# One of ['emptydir', 'pvc', 'hostmount']
+openshift_logging_elasticsearch_storage_type: "emptydir"
+
+# hostmount options
+openshift_logging_elasticsearch_hostmount_path: ""
+
+# pvc options
+# the name of the PVC we will bind to -- create it if it does not exist
+openshift_logging_elasticsearch_pvc_name: ""
+
+# required if the PVC does not already exist
+openshift_logging_elasticsearch_pvc_size: ""
+openshift_logging_elasticsearch_pvc_dynamic: false
+openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
+openshift_logging_elasticsearch_storage_group: '65534'
+
+openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_elasticsearch_ops_deployment: false
+
+openshift_logging_elasticsearch_ops_allow_cluster_reader: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+
+
+openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
index 339b5a1b2..339b5a1b2 100644
--- a/roles/openshift_logging/files/es_migration.sh
+++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh
diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
new file mode 100644
index 000000000..567c9f289
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: rolebinding-reader
+rules:
+- resources:
+ - clusterrolebindings
+ verbs:
+ - get
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
new file mode 100644
index 000000000..097270772
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Elasticsearch Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
new file mode 100644
index 000000000..1a952b5cf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -0,0 +1,19 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ es_version: "{{ __latest_es_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+- debug: var=openshift_logging_image_version
+
+# should we just assume that we will have the correct major version?
+- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Elasticsearch
+ when: es_version not in __allowed_es_versions
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
new file mode 100644
index 000000000..0548e3c40
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -0,0 +1,347 @@
+---
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
+
+- fail:
+ msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed
+ when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types
+
+- set_fact:
+ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# rolebinding reader
+- copy:
+ src: rolebinding-reader.yml
+ dest: "{{ tempdir }}/rolebinding-reader.yml"
+
+- name: Create rolebinding-reader role
+ oc_obj:
+ state: present
+ name: "rolebinding-reader"
+ kind: clusterrole
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/rolebinding-reader.yml"
+ delete_after: true
+
+# SA roles
+- name: Set rolebinding-reader permissions for ES
+ oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: rolebinding-reader
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+
+# View role and binding
+- name: Generate logging-elasticsearch-view-role
+ template:
+ src: rolebinding.j2
+ dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ changed_when: no
+
+- name: Set logging-elasticsearch-view-role role
+ oc_obj:
+ state: present
+ name: "logging-elasticsearch-view-role"
+ kind: rolebinding
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ delete_after: true
+
+# configmap
+- template:
+ src: elasticsearch-logging.yml.j2
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+- template:
+ src: elasticsearch.yml.j2
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ vars:
+ allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ es_logging_contents }}"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ es_config_contents }}"
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+- name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+
+
+# secret
+- name: Set ES secret
+ oc_secret:
+ state: present
+ name: "logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+
+# services
+- name: Set logging-{{ es_component }}-cluster service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}-cluster"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9300
+
+- name: Set logging-{{ es_component }} service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+
+# storageclasses are used by default but if static then disable
+# storageclasses with the storageClassName set to "" in pvc.j2
+- name: Creating ES storage template - static
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+# Storageclasses are used by default if configured
+- name: Creating ES storage template - dynamic
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+- name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+
+- set_fact:
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ when: openshift_logging_elasticsearch_deployment_name == ""
+
+- set_fact:
+ es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}"
+ when: openshift_logging_elasticsearch_deployment_name != ""
+
+# DC
+- name: Set ES dc templates
+ template:
+ src: es.j2
+ dest: "{{ tempdir }}/templates/logging-es-dc.yml"
+ vars:
+ es_cluster_name: "{{ es_component }}"
+ component: "{{ es_component }}"
+ logging_component: elasticsearch
+ deploy_name: "{{ es_deploy_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
+ es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
+ es_replicas: 1
+
+- name: Set ES dc
+ oc_obj:
+ state: present
+ name: "{{ es_deploy_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
+ delete_after: true
+
+- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
+ slurp:
+ src: "{{ generated_certs_dir }}/{{ item.file }}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
+ when: openshift_logging_es_allow_external | bool
+
+- set_fact:
+ es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
+ when:
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact:
+ es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
+ when:
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact:
+ es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
+ when:
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact:
+ es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
+ when:
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- name: Generating Elasticsearch {{ es_component }} route template
+ template:
+ src: route_reencrypt.j2
+ dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml"
+ vars:
+ obj_name: "logging-{{ es_component }}"
+ route_host: "{{ openshift_logging_es_hostname }}"
+ service_name: "logging-{{ es_component }}"
+ tls_key: "{{ es_key | default('') | b64decode }}"
+ tls_cert: "{{ es_cert | default('') | b64decode }}"
+ tls_ca_cert: "{{ es_ca | b64decode }}"
+ tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}"
+ edge_term_policy: "{{ openshift_logging_es_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ changed_when: no
+ when: openshift_logging_es_allow_external | bool
+
+# This currently has an issue if the host name changes
+- name: Setting Elasticsearch {{ es_component }} route
+ oc_obj:
+ state: present
+ name: "logging-{{ es_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: route
+ files:
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ when: openshift_logging_es_allow_external | bool
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
index 499e77fb7..38948ba2f 100644
--- a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
@@ -1,25 +1,14 @@
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
-rootLogger: ${es.logger.level}, {{root_logger}}
+rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: WARN
-
- # deprecation logging, turn to DEBUG to see them
- deprecation: WARN, deprecation_log_file
-
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
-
io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
- # aws will try to do some sketchy JMX stuff, but its not needed.
- com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
- com.amazonaws.metrics.AwsSdkMetrics: ERROR
-
- org.apache.http: INFO
-
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
@@ -39,14 +28,19 @@ logger:
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
- deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ # need this filter until https://github.com/openshift/origin/issues/14515 is fixed
+ filter:
+ 1:
+ type: org.apache.log4j.varia.StringMatchFilter
+ StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1"
+ AcceptOnMatch: false
file:
type: dailyRollingFile
@@ -55,14 +49,23 @@ appender:
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
-
- deprecation_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_deprecation.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ # need this filter until https://github.com/openshift/origin/issues/14515 is fixed
+ filter:
+ 1:
+ type: org.apache.log4j.varia.StringMatchFilter
+ StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1"
+ AcceptOnMatch: false
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 93c4d854c..141967c33 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -14,8 +14,10 @@ index:
flush_threshold_period: 5m
node:
- master: true
- data: true
+ name: ${DC_NAME}
+ master: ${IS_MASTER}
+ data: ${HAS_DATA}
+ max_local_storage_nodes: 1
network:
host: 0.0.0.0
@@ -28,17 +30,17 @@ cloud:
discovery:
type: kubernetes
zen.ping.multicast.enabled: false
- zen.minimum_master_nodes: {{es_min_masters}}
+ zen.minimum_master_nodes: ${NODE_QUORUM}
gateway:
- expected_master_nodes: ${NODE_QUORUM}
- recover_after_nodes: ${RECOVER_AFTER_NODES}
+ recover_after_nodes: ${NODE_QUORUM}
expected_nodes: ${RECOVER_EXPECTED_NODES}
recover_after_time: ${RECOVER_AFTER_TIME}
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
openshift.config:
use_common_data_model: true
@@ -60,7 +62,7 @@ path:
searchguard:
authcz.admin_dn:
- CN=system.admin,OU=OpenShift,O=Logging
- config_index_name: ".searchguard.${HOSTNAME}"
+ config_index_name: ".searchguard.${DC_NAME}"
ssl:
transport:
enabled: true
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 16185fc1d..cbe6b89f2 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -8,7 +8,7 @@ metadata:
deployment: "{{deploy_name}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{es_replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -29,7 +29,7 @@ spec:
serviceAccountName: aggregated-logging-elasticsearch
securityContext:
supplementalGroups:
- - {{openshift_logging_es_storage_group}}
+ - {{openshift_logging_elasticsearch_storage_group}}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
@@ -58,6 +58,9 @@ spec:
name: "cluster"
env:
-
+ name: "DC_NAME"
+ value: "{{deploy_name}}"
+ -
name: "NAMESPACE"
valueFrom:
fieldRef:
@@ -73,19 +76,27 @@ spec:
value: "logging-{{es_cluster_name}}"
-
name: "INSTANCE_RAM"
- value: "{{openshift_logging_es_memory_limit}}"
+ value: "{{openshift_logging_elasticsearch_memory_limit}}"
+ -
+ name: "HEAP_DUMP_LOCATION"
+ value: "/elasticsearch/persistent/heapdump.hprof"
-
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
-
- name: "RECOVER_AFTER_NODES"
- value: "{{es_recover_after_nodes}}"
- -
name: "RECOVER_EXPECTED_NODES"
value: "{{es_recover_expected_nodes}}"
-
name: "RECOVER_AFTER_TIME"
- value: "{{openshift_logging_es_recover_after_time}}"
+ value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+ -
+ name: "IS_MASTER"
+ value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+ -
+ name: "HAS_DATA"
+ value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+
volumeMounts:
- name: elasticsearch
mountPath: /etc/elasticsearch/secret
@@ -103,4 +114,12 @@ spec:
configMap:
name: logging-elasticsearch
- name: elasticsearch-storage
-{% include 'es-storage-'+ es_storage['kind'] + '.partial' %}
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+ hostPath:
+ path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
new file mode 100644
index 000000000..063f9c5ae
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
index fcd4e87cc..fcd4e87cc 100644
--- a/roles/openshift_logging/templates/rolebinding.j2
+++ b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
index cf8a9e65f..cf8a9e65f 100644
--- a/roles/openshift_logging/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
new file mode 100644
index 000000000..7a1f5048b
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -0,0 +1,12 @@
+---
+__latest_es_version: "3_5"
+__allowed_es_versions: ["3_5", "3_6"]
+__allowed_es_types: ["data-master", "data-client", "master", "client"]
+
+# TODO: integrate these
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
+es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}"
+es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
+es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
new file mode 100644
index 000000000..ce7cfc433
--- /dev/null
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -0,0 +1,59 @@
+---
+### General logging settings
+openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_fluentd_namespace: logging
+
+### Common settings
+openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_hosts: ['--all']
+
+# float time in seconds to wait between node labelling
+openshift_logging_fluentd_label_delay: '0.5'
+
+# Fluentd deployment type
+openshift_logging_fluentd_deployment_type: "hosted"
+
+### Used by "hosted" and "secure-host" deployments
+
+# Destination for the application based logs
+openshift_logging_fluentd_app_host: "logging-es"
+openshift_logging_fluentd_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_fluentd_ops_host: "{{ openshift_logging_fluentd_app_host }}"
+openshift_logging_fluentd_ops_port: "{{ openshift_logging_fluentd_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_fluentd_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_app_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_app_ca: /etc/fluent/keys/ca
+openshift_logging_fluentd_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_ops_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_ops_ca: /etc/fluent/keys/ca
+
+
+# used by "secure-host" and "secure-aggregator" deployments
+openshift_logging_fluentd_shared_key: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(128) }}"
+openshift_logging_fluentd_aggregating_port: 24284
+openshift_logging_fluentd_aggregating_host: "${HOSTNAME}"
+openshift_logging_fluentd_aggregating_secure: "no"
+openshift_logging_fluentd_aggregating_strict: "no"
+openshift_logging_fluentd_aggregating_cert_path: none
+openshift_logging_fluentd_aggregating_key_path: none
+openshift_logging_fluentd_aggregating_passphrase: none
+openshift_logging_use_mux_client: False
+
+### Deprecating in 3.6
+openshift_logging_fluentd_es_copy: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
index 375621ff1..375621ff1 100644
--- a/roles/openshift_logging/files/fluentd-throttle-config.yaml
+++ b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..f4483df79 100644
--- a/roles/openshift_logging/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
new file mode 100644
index 000000000..2003aacb2
--- /dev/null
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Fluentd Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
new file mode 100644
index 000000000..a1ba71b1b
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ fluentd_version: "{{ __latest_fluentd_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Fluentd
+ when: fluentd_version not in __allowed_fluentd_versions
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
new file mode 100644
index 000000000..e92a35f27
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -0,0 +1,10 @@
+---
+- name: Label {{ node }} for Fluentd deployment
+ oc_label:
+ name: "{{ node }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+
+# wait half a second between labels
+- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
new file mode 100644
index 000000000..55de2ae8d
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -0,0 +1,202 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_fluentd_app_host or openshift_logging_fluentd_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_fluentd_ops_host or openshift_logging_fluentd_ops_host == ''
+
+- fail:
+ msg: Invalid deployment type, one of ['hosted', 'secure-aggregator', 'secure-host'] allowed
+ when: not openshift_logging_fluentd_deployment_type in __allowed_fluentd_types
+
+- debug:
+ msg: openshift_logging_fluentd_use_journal is deprecated. Fluentd will automatically detect which logging driver is being used.
+ when: openshift_logging_fluentd_use_journal is defined
+
+- debug:
+ msg: openshift_hosted_logging_use_journal is deprecated. Fluentd will automatically detect which logging driver is being used.
+ when: openshift_hosted_logging_use_journal is defined
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# create Fluentd configmap
+- template:
+ src: fluent.conf.j2
+ dest: "{{ tempdir }}/fluent.conf"
+ vars:
+ deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_secureforward_contents is undefined
+
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_config_contents }}"
+ dest: "{{ tempdir }}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_throttle_contents }}"
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_secureforward_contents }}"
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Fluentd configmap
+ oc_configmap:
+ state: present
+ name: "logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent.conf"
+ throttle-config.yaml: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ secure-forward.conf: "{{ tempdir }}/secure-forward.conf"
+
+# create Fluentd secret
+# TODO: add aggregation secrets if necessary
+- name: Set logging-fluentd secret
+ oc_secret:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
+
+# create Fluentd daemonset
+
+# this should change based on the type of fluentd deployment to be done...
+# TODO: pass in aggregation configurations
+- name: Generate logging-fluentd daemonset definition
+ template:
+ src: fluentd.j2
+ dest: "{{ tempdir }}/templates/logging-fluentd.yaml"
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ app_host: "{{ openshift_logging_fluentd_app_host }}"
+ app_port: "{{ openshift_logging_fluentd_app_port }}"
+ ops_host: "{{ openshift_logging_fluentd_ops_host }}"
+ ops_port: "{{ openshift_logging_fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-fluentd daemonset
+ oc_obj:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ kind: daemonset
+ files:
+ - "{{ tempdir }}/templates/logging-fluentd.yaml"
+ delete_after: true
+
+# Scale up Fluentd
+- name: Retrieve list of Fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ register: fluentd_hosts
+
+- name: Set openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
+- include: label_and_wait.yaml
+ vars:
+ node: "{{ fluentd_host }}"
+ with_items: "{{ openshift_logging_fluentd_hosts }}"
+ loop_control:
+ loop_var: fluentd_host
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
new file mode 100644
index 000000000..46de94d60
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
@@ -0,0 +1,78 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+{% if deploy_type in ['hosted', 'secure-aggregator'] %}
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+{% else %}
+<source>
+ @type secure_forward
+ @label @INGRESS
+
+ self_hostname ${HOSTNAME}
+ bind 0.0.0.0
+ port {{openshift_logging_fluentd_aggregating_port}}
+
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <client>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ </client>
+</source>
+{% endif %}
+
+<label @INGRESS>
+{% if deploy_type in ['hosted', 'secure-host'] %}
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-viaq-data-model.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+{% else %}
+ <match **>
+ @type secure_forward
+
+ self_hostname ${HOSTNAME}
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <server>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ port {{openshift_logging_fluentd_aggregating_port}}
+ </server>
+ </match>
+{% endif %}
+</label>
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
new file mode 100644
index 000000000..970e5c2a5
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -0,0 +1,147 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{ daemonset_name }}"
+ labels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ logging-infra: "{{ daemonset_component }}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{ daemonset_container_name }}"
+ labels:
+ logging-infra: "{{ daemonset_component }}"
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ spec:
+ serviceAccountName: "{{ daemonset_serviceAccount }}"
+ nodeSelector:
+ {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"
+ containers:
+ - name: "{{ daemonset_container_name }}"
+ image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{ openshift_logging_fluentd_cpu_limit }}
+ memory: {{ openshift_logging_fluentd_memory_limit }}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+{% endif %}
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{ openshift_logging_fluentd_master_url }}"
+ - name: "ES_HOST"
+ value: "{{ app_host }}"
+ - name: "ES_PORT"
+ value: "{{ app_port }}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_app_client_cert }}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_app_client_key }}"
+ - name: "ES_CA"
+ value: "{{ openshift_logging_fluentd_app_ca }}"
+ - name: "OPS_HOST"
+ value: "{{ ops_host }}"
+ - name: "OPS_PORT"
+ value: "{{ ops_port }}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_ops_client_cert }}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_ops_client_key }}"
+ - name: "OPS_CA"
+ value: "{{ openshift_logging_fluentd_ops_ca }}"
+ - name: "ES_COPY"
+ value: "false"
+ - name: "JOURNAL_SOURCE"
+ value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}"
+ - name: "BUFFER_QUEUE_LIMIT"
+ value: "{{ openshift_logging_fluentd_buffer_queue_limit }}"
+ - name: "BUFFER_SIZE_LIMIT"
+ value: "{{ openshift_logging_fluentd_buffer_size_limit }}"
+ - name: "FLUENTD_CPU_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.cpu
+ - name: "FLUENTD_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.memory
+ - name: "USE_MUX_CLIENT"
+ value: "{{ openshift_logging_use_mux_client | default('false') | lower }}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
+{% endif %}
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
new file mode 100644
index 000000000..ad3fb0bdd
--- /dev/null
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -0,0 +1,4 @@
+---
+__latest_fluentd_version: "3_5"
+__allowed_fluentd_versions: ["3_5", "3_6"]
+__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
new file mode 100644
index 000000000..b2556fd71
--- /dev/null
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+### Common settings
+openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_kibana_namespace: logging
+
+openshift_logging_kibana_nodeselector: ""
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
+
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+
+openshift_logging_kibana_es_host: "logging-es"
+openshift_logging_kibana_es_port: 9200
+
+openshift_logging_kibana_replicas: 1
+openshift_logging_kibana_edge_term_policy: Redirect
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_kibana_ops_deployment: false
+
+# Proxy settings
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
+
+#The absolute path on the control node to the cert file to use
+#for the public facing kibana certs
+openshift_logging_kibana_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing kibana certs
+openshift_logging_kibana_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing kibana certs
+openshift_logging_kibana_ca: ""
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
new file mode 100644
index 000000000..89e08abc0
--- /dev/null
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Kibana Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml
new file mode 100644
index 000000000..53e15af5f
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ kibana_version: "{{ __latest_kibana_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Kibana
+ when: kibana_version not in __allowed_kibana_versions
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
new file mode 100644
index 000000000..62bc26e37
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -0,0 +1,255 @@
+---
+# fail is we don't have an endpoint for ES to connect to?
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+- set_fact:
+ kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+
+# Check {{ generated_certs_dir }} for session_secret and oauth_secret
+- name: Checking for session_secret
+ stat: path="{{generated_certs_dir}}/session_secret"
+ register: session_secret_file
+
+- name: Checking for oauth_secret
+ stat: path="{{generated_certs_dir}}/oauth_secret"
+ register: oauth_secret_file
+
+# gen session_secret if necessary
+- name: Generate session secret
+ copy:
+ content: "{{ 200 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/session_secret"
+ when:
+ - not session_secret_file.stat.exists
+
+# gen oauth_secret if necessary
+- name: Generate oauth secret
+ copy:
+ content: "{{ 64 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/oauth_secret"
+ when:
+ - not oauth_secret_file.stat.exists
+
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp:
+ src: "{{ generated_certs_dir }}/{{ item.file }}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+ - { name: "session_secret", file: "session_secret" }
+ - { name: "oauth_secret", file: "oauth_secret" }
+
+# services
+- name: Set {{ kibana_name }} service
+ oc_service:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_kibana_namespace }}"
+ selector:
+ component: "{{ kibana_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 443
+ targetPort: "oaproxy"
+
+# create routes
+# TODO: set up these certs differently?
+- set_fact:
+ kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}"
+ when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}"
+ when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}"
+ when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
+ when: kibana_ca is not defined
+ changed_when: false
+
+- name: Generating Kibana route template
+ template:
+ src: route_reencrypt.j2
+ dest: "{{ tempdir }}/templates/kibana-route.yaml"
+ vars:
+ obj_name: "{{ kibana_name }}"
+ route_host: "{{ openshift_logging_kibana_hostname }}"
+ service_name: "{{ kibana_name }}"
+ tls_key: "{{ kibana_key | default('') | b64decode }}"
+ tls_cert: "{{ kibana_cert | default('') | b64decode }}"
+ tls_ca_cert: "{{ kibana_ca | b64decode }}"
+ tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}"
+ edge_term_policy: "{{ openshift_logging_kibana_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ changed_when: no
+
+# This currently has an issue if the host name changes
+- name: Setting Kibana route
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: route
+ files:
+ - "{{ tempdir }}/templates/kibana-route.yaml"
+
+# preserve list of current hostnames
+- name: Get current oauthclient hostnames
+ oc_obj:
+ state: list
+ name: kibana-proxy
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ register: oauth_client_list
+
+- set_fact: proxy_hostnames={{ oauth_client_list.results.results[0].redirectURIs | default ([]) + ['https://' ~ openshift_logging_kibana_hostname] }}
+
+# create oauth client
+- name: Create oauth-client template
+ template:
+ src: oauth-client.j2
+ dest: "{{ tempdir }}/templates/oauth-client.yml"
+ vars:
+ kibana_hostnames: "{{ proxy_hostnames | unique }}"
+ secret: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
+
+- name: Set kibana-proxy oauth-client
+ oc_obj:
+ state: present
+ name: "kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ files:
+ - "{{ tempdir }}/templates/oauth-client.yml"
+ delete_after: true
+
+# create Kibana secret
+- name: Set Kibana secret
+ oc_secret:
+ state: present
+ name: "logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.kibana.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.kibana.crt"
+
+# create Kibana-proxy secret
+- name: Set Kibana Proxy secret
+ oc_secret:
+ state: present
+ name: "logging-kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ # TODO: when possible to have both files and contents for oc_secret use this
+ #files:
+ #- name: server-key
+ # path: "{{ generated_certs_dir }}/kibana-internal.key"
+ #- name: server-cert
+ # path: "{{ generated_certs_dir }}/kibana-internal.crt"
+ #- name: server-tls.json
+ # path: "{{ generated_certs_dir }}/server-tls.json"
+ contents:
+ - path: oauth-secret
+ data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
+ - path: session-secret
+ data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}"
+ - path: server-key
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}"
+ - path: server-cert
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}"
+ - path: server-tls.json
+ data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}"
+
+# create Kibana DC
+- name: Generate Kibana DC template
+ template:
+ src: kibana.j2
+ dest: "{{ tempdir }}/templates/kibana-dc.yaml"
+ vars:
+ component: "{{ kibana_component }}"
+ logging_component: kibana
+ deploy_name: "{{ kibana_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}"
+ proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_kibana_es_host }}"
+ es_port: "{{ openshift_logging_kibana_es_port }}"
+ kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
+ kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
+ kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
+
+- name: Set Kibana DC
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/kibana-dc.yaml"
+ delete_after: true
+
+# update master configs?
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
new file mode 100644
index 000000000..512d99d06
--- /dev/null
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -0,0 +1,150 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{ deploy_name }}"
+ labels:
+ provider: openshift
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
+spec:
+ replicas: {{ kibana_replicas | default(1) }}
+ selector:
+ provider: openshift
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{ deploy_name }}"
+ labels:
+ logging-infra: "{{ logging_component }}"
+ provider: openshift
+ component: "{{ component }}"
+ spec:
+ serviceAccountName: aggregated-logging-kibana
+{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in kibana_node_selector.iteritems() %}
+ {{ key }}: "{{ value }}"
+{% endfor %}
+{% endif %}
+ containers:
+ -
+ name: "kibana"
+ image: {{ image }}
+ imagePullPolicy: Always
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+ resources:
+ limits:
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+ cpu: "{{ kibana_cpu_limit }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+ memory: "{{ kibana_memory_limit }}"
+{% endif %}
+{% endif %}
+ env:
+ - name: "ES_HOST"
+ value: "{{ es_host }}"
+ - name: "ES_PORT"
+ value: "{{ es_port }}"
+ -
+ name: "KIBANA_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana
+ resource: limits.memory
+ volumeMounts:
+ - name: kibana
+ mountPath: /etc/kibana/keys
+ readOnly: true
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/kibana/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
+ -
+ name: "kibana-proxy"
+ image: {{ proxy_image }}
+ imagePullPolicy: Always
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+ resources:
+ limits:
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+ cpu: "{{ kibana_proxy_cpu_limit }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+ memory: "{{ kibana_proxy_memory_limit }}"
+{% endif %}
+{% endif %}
+ ports:
+ -
+ name: "oaproxy"
+ containerPort: 3000
+ env:
+ -
+ name: "OAP_BACKEND_URL"
+ value: "http://localhost:5601"
+ -
+ name: "OAP_AUTH_MODE"
+ value: "oauth2"
+ -
+ name: "OAP_TRANSFORM"
+ value: "user_header,token_header"
+ -
+ name: "OAP_OAUTH_ID"
+ value: kibana-proxy
+ -
+ name: "OAP_MASTER_URL"
+ value: {{ openshift_logging_kibana_master_url }}
+ -
+ name: "OAP_PUBLIC_MASTER_URL"
+ value: {{ openshift_logging_kibana_master_public_url }}
+ -
+ name: "OAP_LOGOUT_REDIRECT"
+ value: {{ openshift_logging_kibana_master_public_url }}/console/logout
+ -
+ name: "OAP_MASTER_CA_FILE"
+ value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ -
+ name: "OAP_DEBUG"
+ value: "{{ openshift_logging_kibana_proxy_debug }}"
+ -
+ name: "OAP_OAUTH_SECRET_FILE"
+ value: "/secret/oauth-secret"
+ -
+ name: "OAP_SERVER_CERT_FILE"
+ value: "/secret/server-cert"
+ -
+ name: "OAP_SERVER_KEY_FILE"
+ value: "/secret/server-key"
+ -
+ name: "OAP_SERVER_TLS_FILE"
+ value: "/secret/server-tls.json"
+ -
+ name: "OAP_SESSION_SECRET_FILE"
+ value: "/secret/session-secret"
+ -
+ name: "OCP_AUTH_PROXY_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana-proxy
+ resource: limits.memory
+ volumeMounts:
+ - name: kibana-proxy
+ mountPath: /secret
+ readOnly: true
+ volumes:
+ - name: kibana
+ secret:
+ secretName: logging-kibana
+ - name: kibana-proxy
+ secret:
+ secretName: logging-kibana-proxy
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2
index 41d3123cb..c80ff3d30 100644
--- a/roles/openshift_logging/templates/oauth-client.j2
+++ b/roles/openshift_logging_kibana/templates/oauth-client.j2
@@ -4,10 +4,11 @@ metadata:
name: kibana-proxy
labels:
logging-infra: support
-secret: {{secret}}
+secret: {{ secret }}
redirectURIs:
-- https://{{openshift_logging_kibana_hostname}}
-- https://{{openshift_logging_kibana_ops_hostname}}
+{% for host in kibana_hostnames %}
+- {{ host }}
+{% endfor %}
scopeRestrictions:
- literals:
- user:info
diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
new file mode 100644
index 000000000..cf8a9e65f
--- /dev/null
+++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
@@ -0,0 +1,36 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ route_host }}
+ tls:
+{% if tls_key is defined and tls_key | length > 0 %}
+ key: |
+{{ tls_key|indent(6, true) }}
+{% if tls_cert is defined and tls_cert | length > 0 %}
+ certificate: |
+{{ tls_cert|indent(6, true) }}
+{% endif %}
+{% endif %}
+ caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ termination: reencrypt
+{% if edge_term_policy is defined and edge_term_policy | length > 0 %}
+ insecureEdgeTerminationPolicy: {{ edge_term_policy }}
+{% endif %}
+ to:
+ kind: Service
+ name: {{ service_name }}
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
new file mode 100644
index 000000000..87b281c4b
--- /dev/null
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_kibana_version: "3_5"
+__allowed_kibana_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
new file mode 100644
index 000000000..797a27c1b
--- /dev/null
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -0,0 +1,49 @@
+---
+### General logging settings
+openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_mux_namespace: logging
+
+### Common settings
+openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 2Gi
+openshift_logging_mux_buffer_queue_limit: 1024
+openshift_logging_mux_buffer_size_limit: 1m
+
+openshift_logging_mux_replicas: 1
+
+# Destination for the application based logs
+openshift_logging_mux_app_host: "logging-es"
+openshift_logging_mux_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_mux_ops_host: "{{ openshift_logging_mux_app_host }}"
+openshift_logging_mux_ops_port: "{{ openshift_logging_mux_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_mux_allow_external: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
+
+openshift_logging_mux_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_app_client_key: /etc/fluent/keys/key
+openshift_logging_mux_app_ca: /etc/fluent/keys/ca
+openshift_logging_mux_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_ops_client_key: /etc/fluent/keys/key
+openshift_logging_mux_ops_ca: /etc/fluent/keys/ca
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#mux_config_contents:
+#mux_throttle_contents:
+#mux_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf
index aeaa705ee..aeaa705ee 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging_mux/files/fluent.conf
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
new file mode 100644
index 000000000..f40beb79d
--- /dev/null
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Mux Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_mux/tasks/determine_version.yaml b/roles/openshift_logging_mux/tasks/determine_version.yaml
new file mode 100644
index 000000000..229bcf3d5
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ mux_version: "{{ __latest_mux_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: mux_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for mux
+ when: mux_version not in __allowed_mux_versions
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
new file mode 100644
index 000000000..02815806a
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -0,0 +1,201 @@
+---
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_mux_app_host or openshift_logging_mux_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set hostmount-anyuid permissions
+- name: Set hostmount-anyuid permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# create Mux configmap
+- copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Mux configmap
+ oc_configmap:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent-mux.conf"
+ secure-forward.conf: "{{ tempdir }}/secure-forward-mux.conf"
+
+# create Mux secret
+- name: Set logging-mux secret
+ oc_secret:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.mux.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.mux.crt"
+ - name: shared_key
+ path: "{{ generated_certs_dir }}/mux_shared_key"
+
+# services
+- name: Set logging-mux service for external communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ external_ips:
+ - "{{ ansible_eth0.ipv4.address }}"
+ when: openshift_logging_mux_allow_external | bool
+
+- name: Set logging-mux service for internal communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ when: not openshift_logging_mux_allow_external | bool
+
+# create Mux DC
+- name: Generating mux deploymentconfig
+ template:
+ src: mux.j2
+ dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml"
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{ component }}"
+ image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_mux_app_host }}"
+ es_port: "{{ openshift_logging_mux_app_port }}"
+ ops_host: "{{ openshift_logging_mux_ops_host }}"
+ ops_port: "{{ openshift_logging_mux_ops_port }}"
+ mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
+ mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
+ mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-mux DC
+ oc_obj:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-mux-dc.yaml"
+ delete_after: true
+
+- name: Add mux namespaces
+ oc_project:
+ state: present
+ name: "{{ item }}"
+ node_selector: ""
+ with_items: "{{ openshift_logging_mux_namespaces | union(openshift_logging_mux_default_namespaces) }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
new file mode 100644
index 000000000..2b3b64bb8
--- /dev/null
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -0,0 +1,133 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{mux_replicas|default(1)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-mux
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in mux_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "mux"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if mux_cpu_limit is not none %}
+ cpu: "{{mux_cpu_limit}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
+ memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ - containerPort: "{{ openshift_logging_mux_port }}"
+ name: mux-forward
+ volumeMounts:
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{openshift_logging_mux_master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_mux_app_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_mux_app_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_mux_app_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_mux_app_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_mux_app_ca}}"
+ - name: "OPS_HOST"
+ value: "{{openshift_logging_mux_ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{openshift_logging_mux_ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_mux_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_mux_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_mux_ops_ca}}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{openshift_logging_mux_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_mux_journal_read_from_head|lower}}"
+ - name: FORWARD_LISTEN_HOST
+ value: "{{ openshift_logging_mux_hostname }}"
+ - name: FORWARD_LISTEN_PORT
+ value: "{{ openshift_logging_mux_port }}"
+ - name: USE_MUX
+ value: "true"
+ - name: MUX_ALLOW_EXTERNAL
+ value: "{{ openshift_logging_mux_allow_external | default('false') | lower }}"
+ - name: "BUFFER_QUEUE_LIMIT"
+ value: "{{ openshift_logging_mux_buffer_queue_limit }}"
+ - name: "BUFFER_SIZE_LIMIT"
+ value: "{{ openshift_logging_mux_buffer_size_limit }}"
+ - name: "MUX_CPU_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.cpu
+ - name: "MUX_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.memory
+ volumes:
+ - name: config
+ configMap:
+ name: logging-mux
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
new file mode 100644
index 000000000..4234b74e2
--- /dev/null
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_mux_version: "3_5"
+__allowed_mux_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index f202486a5..cfc4e2722 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -3,24 +3,13 @@
msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
when: not openshift.common.version_gte_3_1_or_1_1 | bool
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
- changed_when: false
-
- name: Add Management Infrastructure project
- command: >
- {{ openshift.common.client_binary }} adm new-project
- management-infra
- --description="Management Infrastructure"
- --config={{manage_iq_tmp_conf}}
- register: osmiq_create_mi_project
- failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
- changed_when: osmiq_create_mi_project.rc == 0
+ oc_project:
+ name: management-infra
+ description: Management Infrastructure
- name: Create Admin and Image Inspector Service Account
oc_serviceaccount:
- kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: "{{ item }}"
namespace: management-infra
state: present
@@ -28,51 +17,42 @@
- management-admin
- inspector-admin
-- name: Create Cluster Role
- shell: >
- echo {{ manageiq_cluster_role | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- --config={{manage_iq_tmp_conf}}
- -f -
- register: osmiq_create_cluster_role
- failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
- changed_when: osmiq_create_cluster_role.rc == 0
+- name: Create manageiq cluster role
+ oc_clusterrole:
+ name: management-infra-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods/proxy
+ verbs:
+ - "*"
- name: Create Hawkular Metrics Admin Cluster Role
- shell: >
- echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{manage_iq_tmp_conf}}
- create -f -
- register: oshawkular_create_cluster_role
- failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
- changed_when: oshawkular_create_cluster_role.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to verify the if the role even exists before
- # we run this task.
+ oc_clusterrole:
+ name: hawkular-metrics-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - hawkular-alerts
+ - hawkular-metrics
+ verbs:
+ - "*"
- name: Configure role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
- with_items: "{{manage_iq_tasks}}"
- register: osmiq_perm_task
- failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
- changed_when: osmiq_perm_task.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to compare the current role/user permissions
- # with their expected state. I think we may have a module for this?
-
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
+ with_items: "{{ manage_iq_tasks }}"
- name: Configure 3_2 role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- register: osmiq_perm_3_2_task
- failed_when: osmiq_perm_3_2_task.rc != 0
- changed_when: osmiq_perm_3_2_task.rc == 0
when: openshift.common.version_gte_3_2_or_1_2 | bool
-
-- name: Clean temporary configuration file
- file: path={{manage_iq_tmp_conf}} state=absent
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 9936bb126..15d667628 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,41 +1,31 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
-
-manageiq_metrics_admin_clusterrole:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: hawkular-metrics-admin
- rules:
- - apiGroups:
- - ""
- resources:
- - hawkular-metrics
- - hawkular-alerts
- verbs:
- - '*'
-
-manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
-
manage_iq_tasks:
-- policy add-role-to-user -n management-infra admin -z management-admin
-- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
-- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
-- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- resource_kind: role
+ resource_name: admin
+ user: management-admin
+- resource_kind: role
+ resource_name: management-infra-admin
+ user: management-admin
+- resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-puller
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: cluster-role
+ resource_name: self-provisioner
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: hawkular-metrics-admin
+ user: system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
-- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-auditor
+ user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index c3300a7ef..fbf69c270 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -15,17 +15,19 @@ Role Variables
From this role:
-| Name | Default value | |
-|-------------------------------------|-----------------------|--------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
-| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
-| oreg_url | UNDEF | Default docker registry to use |
-| openshift_master_api_port | UNDEF | |
-| openshift_master_console_port | UNDEF | |
-| openshift_master_api_url | UNDEF | |
-| openshift_master_console_url | UNDEF | |
-| openshift_master_public_api_url | UNDEF | |
-| openshift_master_public_console_url | UNDEF | |
+| Name | Default value | |
+|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
+| oreg_url | UNDEF | Default docker registry to use |
+| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
+| openshift_master_saconfig_limit_secret_references | false | |
From openshift_common:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 14a1daf6c..2d3ce5bcd 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,4 +1,4 @@
---
openshift_node_ips: []
-# TODO: update setting these values based on the facts
-#openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}"
+r_openshift_master_clean_install: false
+r_openshift_master_etcd3_storage: false
diff --git a/roles/openshift_master/files/atomic-openshift-master.service b/roles/openshift_master/files/atomic-openshift-master.service
new file mode 100644
index 000000000..02af4dd16
--- /dev/null
+++ b/roles/openshift_master/files/atomic-openshift-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Atomic OpenShift Master
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=atomic-openshift-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/atomic-openshift-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=atomic-openshift-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=atomic-openshift-node.service
diff --git a/roles/openshift_master/files/origin-master.service b/roles/openshift_master/files/origin-master.service
new file mode 100644
index 000000000..cf79dda02
--- /dev/null
+++ b/roles/openshift_master/files/origin-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Origin Master Service
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=origin-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/origin-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=origin-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=origin-node.service
diff --git a/roles/openshift_master/tasks/files b/roles/openshift_master/tasks/files
new file mode 120000
index 000000000..feb122881
--- /dev/null
+++ b/roles/openshift_master/tasks/files
@@ -0,0 +1 @@
+../files \ No newline at end of file
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 98e0da1a2..0c4ee319c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -128,6 +128,24 @@
when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
with_items: "{{ openshift.master.identity_providers }}"
+# This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
+# The template file will stomp any other settings made.
+- block:
+ - name: check whether our docker-registry setting exists in the env file
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ ignore_errors: true
+ changed_when: false
+ register: already_set
+
+ - set_fact:
+ openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+
+- name: Set fact of all etcd host IPs
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+
- name: Install the systemd units
include: systemd_units.yml
@@ -177,6 +195,7 @@
# https://github.com/openshift/origin/issues/6447
- name: Start and enable master
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-master"
enabled: yes
state: started
@@ -187,6 +206,10 @@
delay: 60
notify: Verify API Server
+- name: Dump logs from master service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master
+ when: start_result | failed
+
- name: Stop and disable non-HA master when running HA
systemd:
name: "{{ openshift.common.service_type }}-master"
@@ -194,7 +217,7 @@
state: stopped
when: openshift_master_ha | bool
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
@@ -220,6 +243,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when: start_result | failed
+
- set_fact:
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
@@ -239,6 +266,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when: start_result | failed
+
- set_fact:
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
@@ -275,6 +306,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when: start_result | failed
+
- name: Wait for master controller service to start on first master
pause:
seconds: 15
@@ -291,6 +326,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when: start_result | failed
+
- set_fact:
master_controllers_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 506c8b129..dfc255b3d 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -32,6 +32,15 @@
- not openshift.common.is_master_system_container | bool
register: create_master_unit_file
+- name: Install Master service file
+ copy:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
+ src: "{{ openshift.common.service_type }}-master.service"
+ register: create_master_unit_file
+ when:
+ - not openshift.common.is_containerized | bool
+ - (openshift.master.ha is not defined or not openshift.master.ha) | bool
+
- command: systemctl daemon-reload
when: create_master_unit_file | changed
@@ -90,6 +99,7 @@
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
+ no_log: True
- name: Preserve Master Controllers Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 6e2439fd9..850fae0e4 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index eef0f414e..e8f7c47b0 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -4,15 +4,15 @@ Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
Before={{ openshift.common.service_type }}-node.service
-After=docker.service
-PartOf=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
@@ -23,5 +23,5 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 088e8db43..69db62f16 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -3,15 +3,15 @@ Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
Wants={{ openshift.common.service_type }}-master-api.service
After={{ openshift.common.service_type }}-master-api.service
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
@@ -22,4 +22,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 938ac2a12..af3ebc6d2 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -44,10 +44,10 @@ assetConfig:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift_master_ha | bool %}
{% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}
auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
{% endif %}
+{% if openshift_master_ha | bool %}
controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
{% endif %}
{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
@@ -139,6 +139,12 @@ kubernetesMasterConfig:
- v1
{% endif %}
apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
+ storage-backend:
+ - etcd3
+ storage-media-type:
+ - application/vnd.kubernetes.protobuf
+{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
@@ -229,7 +235,7 @@ projectConfig:
routingConfig:
subdomain: "{{ openshift_master_default_subdomain | default("") }}"
serviceAccountConfig:
- limitSecretReferences: false
+ limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
managedNames:
- default
- builder
@@ -274,5 +280,12 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
+{% if openshift_template_service_broker_namespaces is defined %}
+templateServiceBrokerConfig:
+ templateNamespaces:
+{% for namespace in openshift_template_service_broker_namespaces %}
+ - {{ namespace }}
+{% endfor %}
+{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index be7644710..31c1dfc33 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -1,18 +1,18 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
After=etcd_container.service
Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index c484d23cc..c05a27559 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index e0adbbf52..a153fb33d 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index d4c9a96ca..d9ffb1b6f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -64,24 +64,24 @@
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
--overwrite=false
+ when: item != openshift_ca_host
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the master client config
+- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ --master={{ hostvars[item].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[item].openshift.master.loopback_api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
@@ -94,8 +94,8 @@
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ when: item != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
run_once: true
@@ -124,7 +124,6 @@
register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
- delegate_to: localhost
become: no
- name: Create a tarball of the master certs
@@ -158,10 +157,10 @@
dest: "{{ openshift_master_config_dir }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
-- file: name={{ g_master_certs_mktemp.stdout }} state=absent
+- name: Delete local temp directory
+ local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent
changed_when: False
when: master_certs_missing | bool
- delegate_to: localhost
become: no
- name: Lookup default group for ansible_ssh_user
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index f1cbbeb2d..a80313505 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,2 +1,24 @@
---
openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+openshift_master_admission_plugin_config:
+ openshift.io/ImagePolicy:
+ configuration:
+ kind: ImagePolicyConfig
+ apiVersion: v1
+ # To require that all images running on the platform be imported first, you may uncomment the
+ # following rule. Any image that refers to a registry outside of OpenShift will be rejected unless it
+ # unless it points directly to an image digest (myregistry.com/myrepo/image@sha256:ea83bcf...) and that
+ # digest has been imported via the import-image flow.
+ #resolveImages: Required
+ executionRules:
+ - name: execution-denied
+ # Reject all images that have the annotation images.openshift.io/deny-execution set to true.
+ # This annotation may be set by infrastructure that wishes to flag particular images as dangerous
+ onResources:
+ - resource: pods
+ - resource: builds
+ reject: true
+ matchImageAnnotations:
+ - key: images.openshift.io/deny-execution
+ value: "true"
+ skipOnResolutionFailure: true
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 01806c97f..e767772ce 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-master
'''
@@ -14,9 +13,12 @@ from distutils.version import LooseVersion # pylint: disable=no-name-in-module,
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# pylint import-error disabled because pylint cannot find the package
-# when installed in a virtualenv
-from ansible.compat.six import string_types # pylint: disable=no-name-in-module,import-error
+
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import string_types, u
+except ImportError:
+ from ansible.module_utils.six import string_types, u
import yaml
@@ -466,7 +468,8 @@ class GitHubIdentityProvider(IdentityProviderOauthBase):
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
- self._optional += [['organizations']]
+ self._optional += [['organizations'],
+ ['teams']]
class FilterModule(object):
@@ -490,10 +493,11 @@ class FilterModule(object):
idp_list.append(idp_inst)
IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
- return yaml.dump([idp.to_dict() for idp in idp_list],
- allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper)
+ return u(yaml.dump([idp.to_dict() for idp in idp_list],
+ allow_unicode=True,
+ default_flow_style=False,
+ width=float("inf"),
+ Dumper=AnsibleDumper))
@staticmethod
def validate_pcs_cluster(data, masters=None):
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 7f7bc4316..b50d6d9db 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,7 +40,7 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -49,7 +49,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'origin':
- # convert short_version to enterpise short_version
+ # convert short_version to enterprise short_version
short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 66e6ecea3..a66cb3c88 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 6f8f09b22..ef8dcd5fd 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -74,7 +74,7 @@
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
- registry_url: "{{ oreg_url | default(None) }}"
+ registry_url: "{{ oreg_url_master | default(oreg_url) | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
@@ -92,7 +92,7 @@
master_count: "{{ openshift_master_count | default(None) }}"
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
- admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
+ admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
@@ -128,10 +128,10 @@
- name: Test if scheduler config is readable
fail:
msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
- when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}"
+ when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'
- name: Set current scheduler predicates and priorities
set_fact:
openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
- when: "{{ scheduler_config_stat.stat.exists }}"
+ when: scheduler_config_stat.stat.exists
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 1fab84c71..4a28fb8f8 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -55,6 +55,8 @@ DEFAULT_PREDICATES_1_5 = [
{'name': 'CheckNodeDiskPressure'},
]
+DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -75,9 +77,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
- ('1.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'origin', DEFAULT_PREDICATES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
]
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 1098f9391..97ef2387e 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -42,6 +42,8 @@ DEFAULT_PRIORITIES_1_5 = [
{'name': 'TaintTolerationPriority', 'weight': 1}
]
+DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5
+
ZONE_PRIORITY = {
'name': 'Zone',
'argument': {
@@ -63,9 +65,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4),
('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
- ('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'origin', DEFAULT_PRIORITIES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6),
]
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index f4c61a75e..1f10de4a2 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -68,6 +68,9 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_resolution`: How often metrics should be gathered.
+- `openshift_metrics_install_hawkular_agent`: Install the Hawkular OpenShift Agent (HOSA). HOSA can be used
+ to collect custom metrics from your pods. This component is currently in tech-preview and is not installed by default.
+
## Additional variables to control resource limits
Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting
the corresponding role variable:
@@ -76,7 +79,7 @@ openshift_metrics_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
```
e.g
```
-openshift_metrics_cassandra_limits_memory: 1G
+openshift_metrics_cassandra_limits_memory: 1Gi
openshift_metrics_hawkular_requests_cpu: 100
```
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 1d3db8a1a..c34936930 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -30,6 +31,14 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
+openshift_metrics_install_hawkular_agent: False
+openshift_metrics_hawkular_agent_limits_memory: null
+openshift_metrics_hawkular_agent_limits_cpu: null
+openshift_metrics_hawkular_agent_requests_memory: null
+openshift_metrics_hawkular_agent_requests_cpu: null
+openshift_metrics_hawkular_agent_nodeselector: ""
+openshift_metrics_hawkular_agent_namespace: "default"
+
openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
deleted file mode 100755
index f977b6dd6..000000000
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-set -ex
-
-function import_certs() {
- dir=$CERT_DIR
- hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode)
- hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode)
- hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
- if [ ! -f $dir/hawkular-metrics.keystore ]; then
- echo "Creating the Hawkular Metrics keystore from the PEM file"
- keytool -importkeystore -v \
- -srckeystore $dir/hawkular-metrics.pkcs12 \
- -destkeystore $dir/hawkular-metrics.keystore \
- -srcstoretype PKCS12 \
- -deststoretype JKS \
- -srcstorepass $hawkular_metrics_keystore_password \
- -deststorepass $hawkular_metrics_keystore_password
- fi
-
- cert_alias_names=(ca metricca)
-
- for cert_alias in ${cert_alias_names[*]}; do
- if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
- echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore"
- keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
- -file ${dir}/ca.crt \
- -keystore $dir/hawkular-metrics.truststore \
- -trustcacerts \
- -storepass $hawkular_metrics_truststore_password
- fi
- done
-}
-
-import_certs
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 7af3f9467..3dc15d58b 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 01fc1ef64..31129a6ac 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -13,42 +13,29 @@
hostnames: hawkular-cassandra
changed_when: no
-- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd
- register: hawkular_truststore_password
-
-- stat: path="{{mktemp.stdout}}/{{item}}"
- register: pwd_file_stat
- with_items:
- - hawkular-metrics.pwd
- - hawkular-metrics.htpasswd
- changed_when: no
-
-- set_fact:
- pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
- with_items: "{{pwd_file_stat.results}}"
- changed_when: no
-
- name: generate password for hawkular metrics
- local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
+ become: false
+
+- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd"
+ register: hawkular_metrics_pwd
+ no_log: true
+ become: false
- name: generate htpasswd file for hawkular metrics
- local_action: >
- shell htpasswd -ci
- '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular
- < '{{ local_tmp.stdout }}/hawkular-metrics.pwd'
+ local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
+ become: false
- name: copy local generated passwords to target
copy:
- src: "{{local_tmp.stdout}}/{{item}}"
- dest: "{{mktemp.stdout}}/{{item}}"
+ src: "{{ local_tmp.stdout }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
-- include: import_jks_certs.yaml
-
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
@@ -56,13 +43,11 @@
register: hawkular_secrets
with_items:
- ca.crt
- - hawkular-metrics.crt
- - hawkular-metrics.keystore
- - hawkular-metrics-keystore.pwd
- - hawkular-metrics.truststore
- - hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
+ - hawkular-metrics.crt
+ - hawkular-metrics.key
+ - hawkular-metrics.pem
- hawkular-cassandra.crt
- hawkular-cassandra.key
- hawkular-cassandra.pem
@@ -73,41 +58,24 @@
{{ hawkular_secrets.results|map(attribute='stdout')|join('
')|from_yaml }}
-- name: generate hawkular-metrics-secrets secret template
+- name: generate hawkular-metrics-certs secret template
template:
src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml"
+ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-certs.yaml"
vars:
- name: hawkular-metrics-secrets
+ name: hawkular-metrics-certs
labels:
- metrics-infra: hawkular-metrics
- data:
- hawkular-metrics.keystore: >
- {{ hawkular_secrets['hawkular-metrics.keystore'] }}
- hawkular-metrics.keystore.password: >
- {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }}
- hawkular-metrics.truststore: >
- {{ hawkular_secrets['hawkular-metrics.truststore'] }}
- hawkular-metrics.truststore.password: >
- {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }}
- hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
- hawkular-metrics.htpasswd.file: >
- {{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- when: name not in metrics_secrets.stdout_lines
- changed_when: no
-
-- name: generate hawkular-metrics-certificate secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml"
- vars:
- name: hawkular-metrics-certificate
- labels:
- metrics-infra: hawkular-metrics
+ metrics-infra: hawkular-metrics-certs
+ annotations:
+ service.alpha.openshift.io/originating-service-name: hawkular-metrics
data:
- hawkular-metrics.certificate: >
+ tls.crt: >
{{ hawkular_secrets['hawkular-metrics.crt'] }}
- hawkular-metrics-ca.certificate: >
+ tls.key: >
+ {{ hawkular_secrets['hawkular-metrics.key'] }}
+ tls.truststore.crt: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ ca.crt: >
{{ hawkular_secrets['ca.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
@@ -122,6 +90,7 @@
metrics-infra: hawkular-metrics
data:
hawkular-metrics.username: "{{ 'hawkular'|b64encode }}"
+ hawkular-metrics.htpasswd: "{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}"
hawkular-metrics.password: >
{{ hawkular_secrets['hawkular-metrics.pwd'] }}
when: name not in metrics_secrets.stdout_lines
diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
deleted file mode 100644
index ced2df1d0..000000000
--- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: generate heapster key/cert
- command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --key='{{ mktemp.stdout }}/heapster.key'
- --cert='{{ mktemp.stdout }}/heapster.cert'
- --hostnames=heapster
- --signer-cert='{{ mktemp.stdout }}/ca.crt'
- --signer-key='{{ mktemp.stdout }}/ca.key'
- --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'
-
-- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
- block:
- - name: read files for the heapster secret
- slurp: src={{ item }}
- register: heapster_secret
- with_items:
- - "{{ mktemp.stdout }}/heapster.cert"
- - "{{ mktemp.stdout }}/heapster.key"
- - "{{ client_ca }}"
- vars:
- custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt"
- default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"
- - name: generate heapster secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
- force: no
- vars:
- name: heapster-secrets
- labels:
- metrics-infra: heapster
- data:
- heapster.cert: "{{ heapster_secret.results[0].content }}"
- heapster.key: "{{ heapster_secret.results[1].content }}"
- heapster.client-ca: "{{ heapster_secret.results[2].content }}"
- heapster.allowed-users: >
- {{ openshift_metrics_heapster_allowed_users|b64encode }}
diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
new file mode 100644
index 000000000..e81d90ae7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: generate heapster secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
+ force: no
+ vars:
+ name: heapster-secrets
+ labels:
+ metrics-infra: heapster
+ data:
+ heapster.allowed-users: >
+ {{ openshift_metrics_heapster_allowed_users|b64encode }}
+ when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
index e050c8eb2..1304ab8b5 100644
--- a/roles/openshift_metrics/tasks/generate_rolebindings.yaml
+++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
@@ -13,3 +13,27 @@
- kind: ServiceAccount
name: hawkular
changed_when: no
+
+- name: generate hawkular-metrics cluster role binding for the hawkular service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-namespace-watcher
+ labels:
+ metrics-infra: hawkular
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-metrics
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular
+ namespace: "{{openshift_metrics_project}}"
+ changed_when: no
+
+- name: generate the hawkular cluster role
+ template:
+ src: hawkular_metrics_role.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
deleted file mode 100644
index e098145e9..000000000
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"
- register: metrics_keystore
- check_mode: no
-
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore"
- register: metrics_truststore
- check_mode: no
-
-- block:
- - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
- register: metrics_keystore_password
-
- - fetch:
- dest: "{{local_tmp.stdout}}/"
- src: "{{ mktemp.stdout }}/{{item}}"
- flat: yes
- changed_when: False
- with_items:
- - hawkular-metrics.pkcs12
- - hawkular-metrics.crt
- - ca.crt
-
- - local_action: command {{role_path}}/files/import_jks_certs.sh
- environment:
- CERT_DIR: "{{local_tmp.stdout}}"
- METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
- METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
- changed_when: False
-
- - copy:
- dest: "{{mktemp.stdout}}/"
- src: "{{item}}"
- with_fileglob: "{{local_tmp.stdout}}/*.*store"
-
- when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index a467c1a51..7928a0346 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -35,6 +35,8 @@
metrics-infra: hawkular-cassandra
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -49,10 +51,9 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- annotations:
- volume.alpha.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index c490bcdd3..0eb852d91 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -20,9 +20,9 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certificate
+ - hawkular-metrics-certs
- hawkular-metrics-account
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
@@ -41,6 +41,8 @@
- {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: heapster-certs
labels:
metrics-infra: "{{obj_name}}"
name: "{{obj_name}}"
@@ -64,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_certificates.yaml
+- include: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
new file mode 100644
index 000000000..7c9bc26d0
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -0,0 +1,44 @@
+---
+- name: Generate Hawkular Agent (HOSA) Cluster Role
+ template:
+ src: hawkular_openshift_agent_role.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-role.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Service Account
+ template:
+ src: hawkular_openshift_agent_sa.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-sa.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Daemon Set
+ template:
+ src: hawkular_openshift_agent_ds.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-ds.yaml"
+ vars:
+ node_selector: "{{openshift_metrics_hawkular_agent_nodeselector | default('') }}"
+ changed_when: no
+
+- name: Generate the Hawkular Agent (HOSA) Configmap
+ template:
+ src: hawkular_openshift_agent_cm.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-cm.yaml"
+ changed_when: no
+
+- name: Generate role binding for the hawkular-openshift-agent service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-openshift-agent-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-openshift-agent-rb
+ labels:
+ metrics-infra: hawkular-agent
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-openshift-agent
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index ffe6f63a2..fdf4ae57f 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -10,17 +10,25 @@
- cassandra
loop_control:
loop_var: include_file
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
include: install_heapster.yaml
- when: "{{ openshift_metrics_heapster_standalone | bool }}"
+ when: openshift_metrics_heapster_standalone | bool
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+- name: Install Hawkular OpenShift Agent (HOSA)
+ include: install_hosa.yaml
+ when: openshift_metrics_install_hawkular_agent | default(false) | bool
+
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^(?!metrics-hawkular-openshift-agent).*.yaml"
+ use_regex: true
register: object_def_files
changed_when: no
-- slurp: src={{item.path}}
+- slurp:
+ src: "{{item.path}}"
register: object_defs
with_items: "{{object_def_files.files}}"
changed_when: no
@@ -34,6 +42,31 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^metrics-hawkular-openshift-agent.*.yaml"
+ use_regex: true
+ register: hawkular_agent_object_def_files
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- slurp:
+ src: "{{item.path}}"
+ register: hawkular_agent_object_defs
+ with_items: "{{ hawkular_agent_object_def_files.files }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- name: Create Hawkular Agent objects
+ include: oc_apply.yaml
+ vars:
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ hawkular_agent_object_defs.results }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+
- include: update_master_config.yaml
- command: >
@@ -48,7 +81,7 @@
- name: Scaling down cluster to recognize changes
include: stop_metrics.yaml
- when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}"
+ when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index 5cefb273d..584e3be05 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -4,6 +4,7 @@
register: htpasswd_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node"
when: htpasswd_check.rc == 1
@@ -13,6 +14,7 @@
register: keytool_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
when: keytool_check.rc == 1
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c8d222c60..eaabdd20f 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,4 +1,13 @@
---
+- local_action: shell python -c 'import passlib' 2>/dev/null || echo not installed
+ register: passlib_result
+ become: false
+
+- name: Check that python-passlib is available on the control host
+ assert:
+ that:
+ - "'not installed' not in passlib_result.stdout"
+ msg: "python-passlib rpm must be installed on control host"
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
@@ -19,12 +28,13 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
- when: "{{ openshift_metrics_install_metrics | bool }}"
+ when: openshift_metrics_install_metrics | bool
- name: Create temp directory local on control node
local_action: command mktemp -d
register: local_tmp
changed_when: False
+ become: false
- name: Copy the admin client config(s)
command: >
@@ -35,8 +45,12 @@
- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: uninstall_hosa.yaml
+ when: not openshift_metrics_install_hawkular_agent | bool
+
- name: Delete temp directory
local_action: file path=local_tmp.stdout state=absent
tags: metrics_cleanup
changed_when: False
check_mode: no
+ become: false
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index dd67703b4..1e1af40e8 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -14,7 +14,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
- -n {{ openshift_metrics_project }}
+ -n {{namespace}}
register: generation_apply
failed_when: "'error' in generation_apply.stderr"
changed_when: no
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 199968579..2d880f4d6 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index b5a1c8f06..2037e8dc3 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -20,7 +20,7 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
- command: >
{{openshift.common.client_binary}}
@@ -42,7 +42,7 @@
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_metrics_rc | length > 0 }}"
+ changed_when: metrics_metrics_rc | length > 0
- command: >
{{openshift.common.client_binary}}
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index f69bb0f11..9a2ce9267 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -41,7 +41,7 @@
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_hawkular_rc | length > 0 }}"
+ changed_when: metrics_hawkular_rc | length > 0
- command: >
{{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
@@ -63,4 +63,4 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
new file mode 100644
index 000000000..42ed02460
--- /dev/null
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -0,0 +1,15 @@
+---
+- name: remove Hawkular Agent (HOSA) components
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found --selector=metrics-infra=agent
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ register: delete_metrics
+ changed_when: delete_metrics.stdout != 'No resources found'
+
+- name: remove rolebindings
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found
+ clusterrolebinding/hawkular-openshift-agent-rb
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 8a6be6237..403b1252c 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -6,9 +6,9 @@
command: >
{{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra
- all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
register: delete_metrics
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
- name: remove rolebindings
command: >
@@ -16,4 +16,5 @@
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ clusterrolebinding/hawkular-metrics
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
index 20fc45fd4..be1e3c3a0 100644
--- a/roles/openshift_metrics/tasks/update_master_config.yaml
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.metricsPublicURL
yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 889317847..fc82f49b1 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -30,6 +30,7 @@ spec:
{% endif %}
containers:
- image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
+ imagePullPolicy: Always
name: hawkular-cassandra-{{ node }}
ports:
- name: cql-port
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 361378df3..9a9363075 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -25,6 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
name: hawkular-metrics
ports:
- name: http-endpoint
@@ -40,24 +41,20 @@ spec:
- "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra"
- "-Dhawkular.metrics.cassandra.use-ssl"
- "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.metrics.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization"
- "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}"
- "-Dhawkular.metrics.admin-tenant=_hawkular_admin"
- "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra"
- "-Dhawkular-alerts.cassandra-use-ssl"
- "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.alerts.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization"
- "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true"
- "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true"
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
- - "--hmw.keystore=/secrets/hawkular-metrics.keystore"
- - "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -67,6 +64,8 @@ spec:
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
value: "{{ 17 | oo_random_word }}"
+ - name: TRUSTSTORE_AUTHORITIES
+ value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -76,10 +75,10 @@ spec:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
volumeMounts:
- - name: hawkular-metrics-secrets
- mountPath: "/secrets"
- - name: hawkular-metrics-client-secrets
- mountPath: "/client-secrets"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
+ - name: hawkular-metrics-account
+ mountPath: "/hawkular-account"
{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
@@ -118,9 +117,9 @@ spec:
command:
- "/opt/hawkular/scripts/hawkular-metrics-liveness.py"
volumes:
- - name: hawkular-metrics-secrets
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-secrets
- - name: hawkular-metrics-client-secrets
+ secretName: hawkular-metrics-certs
+ - name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_role.j2 b/roles/openshift_metrics/templates/hawkular_metrics_role.j2
new file mode 100644
index 000000000..6c9dbf5d6
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_metrics_role.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-metrics
+ labels:
+ metrics-infra: hawkular-metrics
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - get
+ - watch
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
new file mode 100644
index 000000000..bf472c066
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
@@ -0,0 +1,54 @@
+id: hawkular-openshift-agent
+kind: ConfigMap
+apiVersion: v1
+name: Hawkular OpenShift Agent Configuration
+metadata:
+ name: hawkular-openshift-agent-configuration
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+data:
+ config.yaml: |
+ kubernetes:
+ tenant: ${POD:namespace_name}
+ hawkular_server:
+ url: https://hawkular-metrics.openshift-infra.svc.cluster.local
+ credentials:
+ username: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.username
+ password: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.password
+ ca_cert_file: secret:openshift-infra/hawkular-metrics-certs/ca.crt
+ emitter:
+ status_enabled: false
+ collector:
+ minimum_collection_interval: 10s
+ default_collection_interval: 30s
+ metric_id_prefix: pod/${POD:uid}/custom/
+ tags:
+ metric_name: ${METRIC:name}
+ description: ${METRIC:description}
+ units: ${METRIC:units}
+ namespace_id: ${POD:namespace_uid}
+ namespace_name: ${POD:namespace_name}
+ node_name: ${POD:node_name}
+ pod_id: ${POD:uid}
+ pod_ip: ${POD:ip}
+ pod_name: ${POD:name}
+ pod_namespace: ${POD:namespace_name}
+ hostname: ${POD:hostname}
+ host_ip: ${POD:host_ip}
+ labels: ${POD:labels}
+ type: pod
+ collector: hawkular_openshift_agent
+ custom_metric: true
+ hawkular-openshift-agent: |
+ endpoints:
+ - type: prometheus
+ protocol: "http"
+ port: 8080
+ path: /metrics
+ collection_interval: 30s
+ metrics:
+ - name: hawkular_openshift_agent_metric_data_points_collected_total
+ - name: hawkular_openshift_agent_monitored_endpoints
+ - name: hawkular_openshift_agent_monitored_pods
+ - name: hawkular_openshift_agent_monitored_metrics
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
new file mode 100644
index 000000000..d65eaf9ae
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -0,0 +1,91 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+spec:
+ selector:
+ matchLabels:
+ name: hawkular-openshift-agent
+ template:
+ metadata:
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ spec:
+ serviceAccount: hawkular-openshift-agent
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
+ name: hawkular-openshift-agent
+{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
+ or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
+ or (openshift_metrics_hawkular_agent_requests_cpu is defined and openshift_metrics_hawkular_agent_requests_cpu is not none)
+ or (openshift_metrics_hawkular_agent_requests_memory is defined and openshift_metrics_hawkular_agent_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_hawkular_agent_limits_cpu is not none
+ or openshift_metrics_hawkular_agent_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_hawkular_agent_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_limits_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_hawkular_agent_requests_cpu is not none
+ or openshift_metrics_hawkular_agent_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_hawkular_agent_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_requests_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /health
+ port: 8080
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ command:
+ - "hawkular-openshift-agent"
+ - "-config"
+ - "/hawkular-openshift-agent-configuration/config.yaml"
+ - "-v"
+ - "3"
+ env:
+ - name: K8S_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: hawkular-openshift-agent-configuration
+ mountPath: "/hawkular-openshift-agent-configuration"
+ volumes:
+ - name: hawkular-openshift-agent-configuration
+ configMap:
+ name: hawkular-openshift-agent-configuration
+ - name: hawkular-openshift-agent
+ configMap:
+ name: hawkular-openshift-agent-configuration
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
new file mode 100644
index 000000000..24b8cd801
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - namespaces
+ - nodes
+ - pods
+ - projects
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
new file mode 100644
index 000000000..ec604d73c
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index 7c837db4d..d8c7763ea 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -27,6 +27,7 @@ spec:
containers:
- name: heapster
image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
ports:
- containerPort: 8082
name: "http-endpoint"
@@ -34,24 +35,24 @@ spec:
- "heapster-wrapper.sh"
- "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"
- "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250"
- - "--tls_cert=/secrets/heapster.cert"
- - "--tls_key=/secrets/heapster.key"
- - "--tls_client_ca=/secrets/heapster.client-ca"
+ - "--tls_cert=/heapster-certs/tls.crt"
+ - "--tls_key=/heapster-certs/tls.key"
+ - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- "--allowed_users=%allowed_users%"
- "--metric_resolution={{openshift_metrics_resolution}}"
{% if not openshift_metrics_heapster_standalone %}
- "--wrapper.username_file=/hawkular-account/hawkular-metrics.username"
- "--wrapper.password_file=/hawkular-account/hawkular-metrics.password"
- "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status"
- - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
+ - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-metrics-certs/tls.crt&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
{% endif %}
env:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
-{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
+{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none)
or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none)
- or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
+ or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_heapster_limits_cpu is not none
@@ -65,8 +66,8 @@ spec:
memory: "{{openshift_metrics_heapster_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_heapster_requests_cpu is not none
- or openshift_metrics_heapster_requests_memory is not none)
+{% if (openshift_metrics_heapster_requests_cpu is not none
+ or openshift_metrics_heapster_requests_memory is not none)
%}
requests:
{% if openshift_metrics_heapster_requests_cpu is not none %}
@@ -80,9 +81,11 @@ spec:
volumeMounts:
- name: heapster-secrets
mountPath: "/secrets"
+ - name: heapster-certs
+ mountPath: "/heapster-certs"
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
- mountPath: "/hawkular-cert"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
- name: hawkular-metrics-account
mountPath: "/hawkular-account"
readinessProbe:
@@ -94,10 +97,13 @@ spec:
- name: heapster-secrets
secret:
secretName: heapster-secrets
+ - name: heapster-certs
+ secret:
+ secretName: heapster-certs
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-certificate
+ secretName: hawkular-metrics-certs
- name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index c2e56ba21..b4e6a1503 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -18,6 +18,13 @@ metadata:
{% endfor %}
{% endif %}
spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
accessModes:
{% for mode in access_modes %}
- {{ mode }}
@@ -25,3 +32,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 08ca87288..423ab54a3 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -17,7 +17,7 @@ spec:
tls:
termination: {{ tls.termination }}
{% if tls.ca_certificate is defined and tls.ca_certificate | length > 0 %}
- CACertificate: |
+ caCertificate: |
{{ tls.ca_certificate|indent(6, true) }}
{% endif %}
{% if tls.key is defined and tls.key | length > 0 %}
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
index 8df89127b..ce0bc2eec 100644
--- a/roles/openshift_metrics/templates/service.j2
+++ b/roles/openshift_metrics/templates/service.j2
@@ -2,6 +2,12 @@ apiVersion: "v1"
kind: "Service"
metadata:
name: "{{obj_name}}"
+{% if annotations is defined%}
+ annotations:
+{% for key, value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
{% if labels is defined%}
labels:
{% for key, value in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f28c3ce48..b20957550 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
new file mode 100644
index 000000000..6ed6d404c
--- /dev/null
+++ b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use with openshift named certificates
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use with openshift named certificates'''
+
+ @staticmethod
+ def oo_named_certificates_list(named_certificates):
+ ''' Returns named certificates list with correct fields for the master
+ config file.'''
+ return [{'certFile': named_certificate['certfile'],
+ 'keyFile': named_certificate['keyfile'],
+ 'names': named_certificate['names']} for named_certificate in named_certificates]
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index b69b60c1d..fb0b494da 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -15,10 +15,11 @@ Role Variables
--------------
From this role:
-| Name | Default value | |
-|------------------------------------------|-----------------------|--------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
-| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|----------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
From openshift_common:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index fffbf2994..47073ee0f 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -6,10 +6,9 @@ os_firewall_allow:
port: 80/tcp
- service: https
port: 443/tcp
-- service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
-- service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | default(true) | bool
+- service: Calico BGP Port
+ port: 179/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index cb51416d4..f2c45a4bd 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,14 +1,35 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ systemd:
+ name: openvswitch
+ state: restarted
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool
+ register: l_openshift_node_stop_openvswitch_result
+ until: not l_openshift_node_stop_openvswitch_result | failed
+ retries: 3
+ delay: 30
notify:
- restart openvswitch pause
+
- name: restart openvswitch pause
pause: seconds=15
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
+ register: l_openshift_node_restart_node_result
+ until: not l_openshift_node_restart_node_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (node_service_status_changed | default(false) | bool)
+
+- name: reload sysctl.conf
+ command: /sbin/sysctl -p
+
+- name: reload systemd units
+ command: systemctl daemon-reload
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c97ff1b4b..e19d82ddc 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -26,15 +26,17 @@ dependencies:
port: 80/tcp
- service: https
port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- role: os_firewall
os_firewall_allow:
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.common.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | default(true) | bool
+- role: os_firewall
+ os_firewall_allow:
+ - service: Calico BGP Port
+ port: 179/tcp
+ when: openshift.common.use_calico | bool
+
- role: os_firewall
os_firewall_allow:
- service: Kubernetes service NodePort TCP
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 626248306..8b4931e7c 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -22,7 +22,7 @@
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url | default(none) }}"
+ registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
@@ -34,6 +34,38 @@
dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+# Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+# End Disable Swap Block
+
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
@@ -58,7 +90,9 @@
package:
name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ - not openshift.common.is_containerized | bool
- name: Install conntrack-tools package
package:
@@ -72,16 +106,28 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+#
+# Use lineinfile w/ a handler for this task until
+# https://github.com/ansible/ansible/pull/24277 is included in an
+# ansible release and we can use the sysctl module.
- name: Persist net.ipv4.ip_forward sysctl entry
- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
+ lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
+ notify:
+ - reload sysctl.conf
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
enabled: yes
state: started
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+ daemon_reload: yes
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
register: ovs_start_result
+ until: not ovs_start_result | failed
+ retries: 3
+ delay: 30
- set_fact:
ovs_service_status_changed: "{{ ovs_start_result | changed }}"
@@ -115,7 +161,7 @@
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
no_log: True
- when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
notify:
- restart node
@@ -168,20 +214,33 @@
- name: Start and enable node dep
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-node-dep"
enabled: yes
state: started
when: openshift.common.is_containerized | bool
+
- name: Start and enable node
systemd:
name: "{{ openshift.common.service_type }}-node"
enabled: yes
state: started
+ daemon_reload: yes
register: node_start_result
until: not node_start_result | failed
retries: 1
delay: 30
+ ignore_errors: true
+
+- name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
+
+- name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 8cfa5a026..c8d653880 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,3 +10,5 @@
name: openvswitch
image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
state: latest
+ values:
+ - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 7d8c42ee2..1b8a7ad50 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,30 +3,52 @@
package: name=glusterfs-fuse state=present
when: not openshift.common.is_atomic | bool
-- name: Check for existence of virt_use_fusefs seboolean
- command: getsebool virt_use_fusefs
- register: virt_use_fusefs_output
- when: ansible_selinux and ansible_selinux.status == "enabled"
+- name: Check for existence of fusefs sebooleans
+ command: getsebool {{ item }}
+ register: fusefs_getsebool_status
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
failed_when: false
changed_when: false
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
- name: Set seboolean to allow gluster storage plugin access from containers
seboolean:
- name: virt_use_fusefs
+ name: "{{ item.item }}"
state: yes
persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_fusefs_output.rc == 0
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
+ - item.rc == 0
+ # We need to detect whether or not the boolean is an alias, since `seboolean`
+ # will error if it is an alias. We do this by inspecting stdout for the boolean name,
+ # since getsebool prints the resolved name. (At some point Ansible's seboolean module
+ # should learn to deal with aliases)
+ - item.item in item.stdout # Boolean does not have an alias.
+ - ansible_python_version | version_compare('3', '<')
+ with_items: "{{ fusefs_getsebool_status.results }}"
-- name: Check for existence of virt_sandbox_use_fusefs seboolean
- command: getsebool virt_sandbox_use_fusefs
- register: virt_sandbox_use_fusefs_output
- when: ansible_selinux and ansible_selinux.status == "enabled"
- failed_when: false
- changed_when: false
-
-- name: Set seboolean to allow gluster storage plugin access from containers(sandbox)
- seboolean:
- name: virt_sandbox_use_fusefs
- state: yes
- persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_fusefs_output.rc == 0
+# Workaround for https://github.com/openshift/openshift-ansible/issues/4438
+# Use command module rather than seboolean module to set GlusterFS booleans.
+# TODO: Remove this task and the ansible_python_version comparison in
+# the previous task when the problem has been addressed in current
+# ansible release.
+- name: Set seboolean to allow gluster storage plugin access from containers (python 3)
+ command: >
+ setsebool -P {{ item.item }} on
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
+ - item.rc == 0
+ # We need to detect whether or not the boolean is an alias, since `seboolean`
+ # will error if it is an alias. We do this by inspecting stdout for the boolean name,
+ # since getsebool prints the resolved name. (At some point Ansible's seboolean module
+ # should learn to deal with aliases)
+ - item.item in item.stdout # Boolean does not have an alias.
+ - ('--> off' in item.stdout) # Boolean is currently off.
+ - ansible_python_version | version_compare('3', '>=')
+ with_items: "{{ fusefs_getsebool_status.results }}"
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index d40ae66cb..7e1035893 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -3,24 +3,52 @@
package: name=nfs-utils state=present
when: not openshift.common.is_atomic | bool
-- name: Check for existence of seboolean
+- name: Check for existence of nfs sebooleans
command: getsebool {{ item }}
- register: getsebool_status
- when: ansible_selinux and ansible_selinux.status == "enabled"
+ register: nfs_getsebool_status
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
failed_when: false
changed_when: false
with_items:
- - virt_use_nfs
- - virt_sandbox_use_nfs
+ - virt_use_nfs
+ - virt_sandbox_use_nfs
- name: Set seboolean to allow nfs storage plugin access from containers
seboolean:
name: "{{ item.item }}"
state: yes
persistent: yes
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
+ - item.rc == 0
# We need to detect whether or not the boolean is an alias, since `seboolean`
# will error if it is an alias. We do this by inspecting stdout for the boolean name,
# since getsebool prints the resolved name. (At some point Ansible's seboolean module
# should learn to deal with aliases)
- when: ansible_selinux and ansible_selinux.status == "enabled" and item.rc == 0 and item.stdout.find(item.item) != -1
- with_items: "{{ getsebool_status.results }}"
+ - item.item in item.stdout # Boolean does not have an alias.
+ - ansible_python_version | version_compare('3', '<')
+ with_items: "{{ nfs_getsebool_status.results }}"
+
+# Workaround for https://github.com/openshift/openshift-ansible/issues/4438
+# Use command module rather than seboolean module to set NFS booleans.
+# TODO: Remove this task and the ansible_python_version comparison in
+# the previous task when the problem has been addressed in current
+# ansible release.
+- name: Set seboolean to allow nfs storage plugin access from containers (python 3)
+ command: >
+ setsebool -P {{ item.item }} on
+ when:
+ - ansible_selinux
+ - ansible_selinux.status == "enabled"
+ - item.rc == 0
+ # We need to detect whether or not the boolean is an alias, since `seboolean`
+ # will error if it is an alias. We do this by inspecting stdout for the boolean name,
+ # since getsebool prints the resolved name. (At some point Ansible's seboolean module
+ # should learn to deal with aliases)
+ - item.item in item.stdout # Boolean does not have an alias.
+ - ('--> off' in item.stdout) # Boolean is currently off.
+ - ansible_python_version | version_compare('3', '>=')
+ with_items: "{{ nfs_getsebool_status.results }}"
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 52482d09b..2ccc28461 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -8,6 +8,9 @@
src: openshift.docker.node.dep.service
register: install_node_dep_result
when: openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- block:
- name: Pre-pull node image
@@ -21,10 +24,23 @@
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: openshift.docker.node.service
register: install_node_result
+ notify:
+ - reload systemd units
+ - restart node
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "node.service.j2"
+ register: install_node_result
+ when: not openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
+
- name: Create the openvswitch service env file
template:
src: openvswitch.sysconfig.j2
@@ -32,6 +48,7 @@
when: openshift.common.is_containerized | bool
register: install_ovs_sysconfig
notify:
+ - reload systemd units
- restart openvswitch
- name: Install Node system container
@@ -60,6 +77,7 @@
when: openshift.common.use_openshift_sdn | default(true) | bool
register: install_oom_fix_result
notify:
+ - reload systemd units
- restart openvswitch
- block:
@@ -74,6 +92,7 @@
dest: "/etc/systemd/system/openvswitch.service"
src: openvswitch.docker.service
notify:
+ - reload systemd units
- restart openvswitch
when:
- openshift.common.is_containerized | bool
@@ -112,9 +131,3 @@
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed
- notify:
- - restart node
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
new file mode 100644
index 000000000..e12a52c15
--- /dev/null
+++ b/roles/openshift_node/templates/node.service.j2
@@ -0,0 +1,31 @@
+[Unit]
+Description=OpenShift Node
+After={{ openshift.docker.service_name }}.service
+Wants=openvswitch.service
+After=ovsdb-server.service
+After=ovs-vswitchd.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+Requires=dnsmasq.service
+After=dnsmasq.service
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+Environment=GOTRACEBACK=crash
+ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
+ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
+ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
+ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier={{ openshift.common.service_type }}-node
+Restart=always
+RestartSec=5s
+TimeoutStartSec=300
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index f2f929232..351c8c9f6 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,5 +1,9 @@
allowDisabledDocker: false
apiVersion: v1
+{% if openshift.common.version_gte_3_6 %}
+dnsBindAddress: 127.0.0.1:53
+dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
+{% endif %}
dnsDomain: {{ openshift.common.dns_domain }}
{% if 'dns_ip' in openshift.node %}
dnsIP: {{ openshift.node.dns_ip }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index c42bdb7c3..639b6f6c8 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,28 +1,34 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
-Requires=openvswitch.service
+Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
Requires={{ openshift.common.service_type }}-node-dep.service
After={{ openshift.common.service_type }}-node-dep.service
+Requires=dnsmasq.service
+After=dnsmasq.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
+ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
+ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 1aa826c09..4abe8bcaf 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -6,6 +6,10 @@
- name: restart docker after updating ca trust
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
+ register: l_docker_restart_docker_in_cert_result
+ until: not l_docker_restart_docker_in_cert_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 9120915b2..1a775178d 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -103,7 +103,6 @@
register: node_cert_mktemp
changed_when: False
when: node_certs_missing | bool
- delegate_to: localhost
become: no
- name: Create a tarball of the node config directories
@@ -141,10 +140,10 @@
dest: "{{ openshift_node_cert_dir }}"
when: node_certs_missing | bool
-- file: name={{ node_cert_mktemp.stdout }} state=absent
+- name: Delete local temp directory
+ local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent
changed_when: False
when: node_certs_missing | bool
- delegate_to: localhost
become: no
- name: Copy OpenShift CA to system CA trust
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 24798d3d2..4aab8f2e9 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -37,6 +37,8 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
UPSTREAM_DNS_TMP_SORTED=`mktemp`
CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
NEW_RESOLV_CONF=`mktemp`
+ NEW_NODE_RESOLV_CONF=`mktemp`
+
######################################################################
# couldn't find an existing method to determine if the interface owns the
@@ -45,25 +47,29 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
if [[ ${DEVICE_IFACE} == ${def_route_int} && \
- -n "${IP4_NAMESERVERS}" ]]; then
+ -n "${IP4_NAMESERVERS}" && \
+ "${IP4_NAMESERVERS}" != "${def_route_ip}" ]]; then
if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
cat << EOF > /etc/dnsmasq.d/origin-dns.conf
no-resolv
domain-needed
server=/cluster.local/172.30.0.1
server=/30.172.in-addr.arpa/172.30.0.1
+enable-dbus
EOF
# New config file, must restart
NEEDS_RESTART=1
fi
######################################################################
- # Generate a new origin dns config file
+ # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+ # and /etc/origin/node/resolv.conf in their respective formats
for ns in ${IP4_NAMESERVERS}; do
if [[ ! -z $ns ]]; then
- echo "server=${ns}"
+ echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+ echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
fi
- done > $UPSTREAM_DNS_TMP
+ done
# Sort it in case DNS servers arrived in a different order
sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
@@ -72,7 +78,6 @@ EOF
# Compare to the current config file (sorted)
NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
-
if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
# DNS has changed, copy the temp file to the proper location (-Z
# sets default selinux context) and set the restart flag
@@ -80,6 +85,13 @@ EOF
NEEDS_RESTART=1
fi
+ # compare /etc/origin/node/resolv.conf checksum and replace it if different
+ NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+ OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+ if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+ cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
+ fi
+
if ! `systemctl -q is-active dnsmasq.service`; then
NEEDS_RESTART=1
fi
@@ -89,13 +101,17 @@ EOF
systemctl restart dnsmasq
fi
- # Only if dnsmasq is running properly make it our only nameserver
+ # Only if dnsmasq is running properly make it our only nameserver and place
+ # a watermark on /etc/resolv.conf
if `systemctl -q is-active dnsmasq.service`; then
- sed -e '/^nameserver.*$/d' /etc/resolv.conf > ${NEW_RESOLV_CONF}
- echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
- if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
fi
+ sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
+ echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
+ if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
+ sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
fi
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index 3311f7006..d0221a94b 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -14,6 +14,17 @@
package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
+# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
+# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
+# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
+# newer we can use --server-file option to update the servers dynamically and
+# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
+# triggers a restart of dnsmasq but not a node restart.
+- name: Install node-dnsmasq.conf
+ template:
+ src: node-dnsmasq.conf.j2
+ dest: /etc/origin/node/node-dnsmasq.conf
+
- name: Install dnsmasq configuration
template:
src: origin-dns.conf.j2
diff --git a/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2 b/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2
new file mode 100644
index 000000000..3caa3bd4a
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2
@@ -0,0 +1,2 @@
+server=/in-addr.arpa/127.0.0.1
+server=/{{ openshift.common.dns_domain }}/127.0.0.1
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index f397cbbf1..779b4d2f5 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,3 +1,7 @@
no-resolv
domain-needed
-server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
+no-negcache
+max-cache-ttl=1
+enable-dbus
+bind-interfaces
+listen-address={{ ansible_default_ipv4.address }}
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 66bce38ec..4e6229bfb 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -84,10 +84,16 @@ Including an example of how to use your role (for instance, with variables passe
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
+
roles:
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
post_tasks:
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index cb51416d4..f26f5d573 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -1,7 +1,16 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ systemd:
+ name: openvswitch
+ state: restarted
+ when:
+ - not skip_node_svc_handlers | default(False) | bool
+ - not (ovs_service_status_changed | default(false) | bool)
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ register: l_openshift_node_upgrade_stop_openvswitch_result
+ until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
+ retries: 3
+ delay: 30
notify:
- restart openvswitch pause
@@ -10,5 +19,13 @@
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
+ register: l_openshift_node_upgrade_restart_node_result
+ until: not l_openshift_node_upgrade_restart_node_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (node_service_status_changed | default(false) | bool)
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index cd2f362aa..2a36d8945 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -10,4 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
+- role: lib_utils
- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
index e91891ca9..ebe87d6fd 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
@@ -6,20 +6,6 @@
# - docker_version
# - skip_docker_restart
-# We need docker service up to remove all the images, but these services will keep
-# trying to re-start and thus re-pull the images we're trying to delete.
-- name: Stop containerized services
- service: name={{ item }} state=stopped
- with_items:
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- - etcd_container
- - openvswitch
- failed_when: false
- when: openshift.common.is_containerized | bool
-
- name: Check Docker image count
shell: "docker images -aq | wc -l"
register: docker_image_count
@@ -40,10 +26,15 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_openshift_node_upgrade_docker_stop_result
+ until: not l_openshift_node_upgrade_docker_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- include: restart.yml
- when: not skip_docker_restart | default(False) | bool
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 6ae8dbc12..f984a04b2 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -9,6 +9,44 @@
# - openshift_release
# tasks file for openshift_node_upgrade
+
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Stop additional containerized services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-master-api"
+ - etcd_container
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Pre-pull node image
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
+
- include: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
@@ -16,7 +54,6 @@
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
- - not openshift.common.is_containerized | bool
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
@@ -67,28 +104,64 @@
state: latest
when: not openshift.common.is_containerized | bool
-- name: Restart openvswitch
- systemd:
- name: openvswitch
- state: started
- when:
- - not openshift.common.is_containerized | bool
-
-# Mandatory Docker restart, ensure all containerized services are running:
-- include: docker/restart.yml
-
- name: Update oreg value
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
-- name: Restart rpm node service
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- when: not openshift.common.is_containerized | bool
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+ # End Disable Swap Block
+
+- name: Reset selinux context
+ command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux.status == 'enabled'
+
+- name: Apply 3.6 dns config changes
+ yedit:
+ src: /etc/origin/node/node-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: "dnsBindAddress"
+ value: "127.0.0.1:53"
+ - key: "dnsRecursiveResolvConf"
+ value: "/etc/origin/node/resolv.conf"
+
+# Restart all services
+- include: restart.yml
- name: Wait for node to be ready
oc_obj:
@@ -97,7 +170,10 @@
name: "{{ openshift.common.hostname | lower }}"
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
- until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
# Give the node two minutes to come back online.
retries: 24
delay: 5
+
+- include_role:
+ name: openshift_node_dnsmasq
diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 176fc3c0b..f228b6e08 100644
--- a/roles/openshift_node_upgrade/tasks/docker/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -5,14 +5,28 @@
# - openshift.common.hostname
# - openshift.master.api_port
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd to ensure latest unit files
+ command: systemctl daemon-reload
+
- name: Restart docker
- service: name=docker state=restarted
+ service:
+ name: "{{ openshift.docker.service_name }}"
+ state: started
+ register: docker_start_result
+ until: not docker_start_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
role: docker
-- name: Restart containerized services
+- name: Start services
service: name={{ item }} state=started
with_items:
- etcd_container
@@ -22,7 +36,6 @@
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
wait_for:
@@ -30,4 +43,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
index 480e87d58..a998acf21 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
@@ -12,3 +12,18 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "node.service.j2"
+ register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index 862cd19c4..e8f017445 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -18,21 +18,6 @@
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
-
-- name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
-
- name: Install Node dependencies docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
new file mode 100644
index 000000000..e12a52c15
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/node.service.j2
@@ -0,0 +1,31 @@
+[Unit]
+Description=OpenShift Node
+After={{ openshift.docker.service_name }}.service
+Wants=openvswitch.service
+After=ovsdb-server.service
+After=ovs-vswitchd.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+Requires=dnsmasq.service
+After=dnsmasq.service
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+Environment=GOTRACEBACK=crash
+ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
+ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
+ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
+ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier={{ openshift.common.service_type }}-node
+Restart=always
+RestartSec=5s
+TimeoutStartSec=300
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 0ff398152..639b6f6c8 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -1,26 +1,34 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
-Requires=openvswitch.service
+Wants=openvswitch.service
+After=ovsdb-server.service
+After=ovs-vswitchd.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
Requires={{ openshift.common.service_type }}-node-dep.service
After={{ openshift.common.service_type }}-node-dep.service
+Requires=dnsmasq.service
+After=dnsmasq.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
+ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
+ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 877e88002..9c5103597 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -7,6 +7,12 @@ items:
kind: PersistentVolume
metadata:
name: "{{ volume.name }}"
+{% if volume.labels is defined and volume.labels is mapping %}
+ labels:
+{% for key,value in volume.labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
spec:
capacity:
storage: "{{ volume.capacity }}"
diff --git a/roles/openshift_provisioners/README.md b/roles/openshift_provisioners/README.md
new file mode 100644
index 000000000..7449073e6
--- /dev/null
+++ b/roles/openshift_provisioners/README.md
@@ -0,0 +1,29 @@
+# OpenShift External Dynamic Provisioners
+
+## Required Vars
+* `openshift_provisioners_install_provisioners`: When `True` the openshift_provisioners role will install provisioners that have their "master" var (e.g. `openshift_provisioners_efs`) set `True`. When `False` will uninstall provisioners that have their var set `True`.
+
+## Optional Vars
+* `openshift_provisioners_image_prefix`: The prefix for the provisioner images to use. Defaults to 'docker.io/openshift/origin-'.
+* `openshift_provisioners_image_version`: The image version for the provisioner images to use. Defaults to 'latest'.
+* `openshift_provisioners_project`: The namespace that provisioners will be installed in. Defaults to 'openshift-infra'.
+
+## AWS EFS
+
+### Prerequisites
+* An IAM user assigned the AmazonElasticFileSystemReadOnlyAccess policy (or better)
+* An EFS file system in your cluster's region
+* [Mount targets](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) and [security groups](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) such that any node (in any zone in the cluster's region) can mount the EFS file system by its [File system DNS name](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html)
+
+### Required Vars
+* `openshift_provisioners_efs_fsid`: The [File system ID](http://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) of the EFS file system, e.g. fs-47a2c22e.
+* `openshift_provisioners_efs_region`: The Amazon EC2 region of the EFS file system.
+* `openshift_provisioners_efs_aws_access_key_id`: The AWS access key of the IAM user, used to check that the EFS file system specified actually exists.
+* `openshift_provisioners_efs_aws_secret_access_key`: The AWS secret access key of the IAM user, used to check that the EFS file system specified actually exists.
+
+### Optional Vars
+* `openshift_provisioners_efs`: When `True` the AWS EFS provisioner will be installed or uninstalled according to whether `openshift_provisioners_install_provisioners` is `True` or `False`, respectively. Defaults to `False`.
+* `openshift_provisioners_efs_path`: The path of the directory in the EFS file system in which the EFS provisioner will create a directory to back each PV it creates. It must exist and be mountable by the EFS provisioner. Defaults to '/persistentvolumes'.
+* `openshift_provisioners_efs_name`: The `provisioner` name that `StorageClasses` specify. Defaults to 'openshift.org/aws-efs'.
+* `openshift_provisioners_efs_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+* `openshift_provisioners_efs_supplementalgroup`: The supplemental group to give the pod in case it is needed for permission to write to the EFS file system. Defaults to '65534'.
diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml
new file mode 100644
index 000000000..a6f040831
--- /dev/null
+++ b/roles/openshift_provisioners/defaults/main.yaml
@@ -0,0 +1,12 @@
+---
+openshift_provisioners_install_provisioners: True
+openshift_provisioners_image_prefix: docker.io/openshift/origin-
+openshift_provisioners_image_version: latest
+
+openshift_provisioners_efs: False
+openshift_provisioners_efs_path: /persistentvolumes
+openshift_provisioners_efs_name: openshift.org/aws-efs
+openshift_provisioners_efs_nodeselector: ""
+openshift_provisioners_efs_supplementalgroup: '65534'
+
+openshift_provisioners_project: openshift-infra
diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml
new file mode 100644
index 000000000..cb9278eb7
--- /dev/null
+++ b/roles/openshift_provisioners/meta/main.yaml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Provisioners
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..ac21a5e37
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,19 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: provisioners-{{item}}
+ obj_name: run-provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ crb_usernames: ["system:serviceaccount:{{openshift_provisioners_project}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_provisioners_project}}"
+ cr_name: "system:persistent-volume-provisioner"
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..e6cbb1bbf
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generate secret for efs
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+ vars:
+ name: efs
+ obj_name: "provisioners-efs"
+ labels:
+ provisioners-infra: support
+ secrets:
+ - {key: aws-access-key-id, value: "{{openshift_provisioners_efs_aws_access_key_id}}"}
+ - {key: aws-secret-access-key, value: "{{openshift_provisioners_efs_aws_secret_access_key}}"}
+ check_mode: no
+ changed_when: no
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..4fe0583ee
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+ vars:
+ obj_name: provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
new file mode 100644
index 000000000..b53b6afa1
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -0,0 +1,70 @@
+---
+- name: Check efs current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+ -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
+ register: efs_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate efs PersistentVolumeClaim
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ pv_selector:
+ provisioners-efs: efs
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs PersistentVolume
+ template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ labels:
+ provisioners-efs: efs
+ volume_plugin: "nfs"
+ volume_source:
+ - {key: "server", value: "{{openshift_provisioners_efs_fsid}}.efs.{{openshift_provisioners_efs_region}}.amazonaws.com"}
+ - {key: "path", value: "{{openshift_provisioners_efs_path}}"}
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs DeploymentConfig
+ template:
+ src: efs.j2
+ dest: "{{ mktemp.stdout }}/templates/{{deploy_name}}-dc.yaml"
+ vars:
+ name: efs
+ deploy_name: "provisioners-efs"
+ deploy_serviceAccount: "provisioners-efs"
+ replica_count: "{{efs_replica_count.stdout | default(0)}}"
+ node_selector: "{{openshift_provisioners_efs_nodeselector | default('') }}"
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: false
+
+# anyuid in order to run as root & chgrp shares with allocated gids
+- name: "Check efs anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/anyuid -o jsonpath='{.users}'
+ register: efs_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set anyuid permissions for efs"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
+ register: efs_output
+ failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
+ check_mode: no
+ when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
new file mode 100644
index 000000000..324fdcc82
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -0,0 +1,55 @@
+---
+- name: Check that EFS File System ID is set
+ fail: msg='the openshift_provisioners_efs_fsid variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_fsid is not defined
+
+- name: Check that EFS region is set
+ fail: msg='the openshift_provisioners_efs_region variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_region is not defined
+
+- name: Check that EFS AWS access key id is set
+ fail: msg='the openshift_provisioners_efs_aws_access_key_id variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_access_key_id is not defined
+
+- name: Check that EFS AWS secret access key is set
+ fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+
+- name: Install support
+ include: install_support.yaml
+
+- name: Install EFS
+ include: install_efs.yaml
+ when: openshift_provisioners_efs | bool
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_provisioners_project }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
new file mode 100644
index 000000000..ba472f1c9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -0,0 +1,24 @@
+---
+- name: Check for provisioners project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
+ register: provisioners_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Create provisioners project
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
+ when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
new file mode 100644
index 000000000..a50c78c97
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -td openshift-provisioners-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: provisioners_init
+
+- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+ when: openshift_provisioners_install_provisioners | default(false) | bool
+
+- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+ when: not openshift_provisioners_install_provisioners | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: provisioners_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index cb9509de1..49d03f203 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -1,8 +1,7 @@
---
- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
@@ -39,7 +38,7 @@
register: generation_delete
failed_when: "'error' in generation_delete.stderr"
changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ when: generation_apply.rc != 0
- name: Recreating {{file_name}}
command: >
@@ -49,4 +48,4 @@
register: generation_apply
failed_when: "'error' in generation_apply.stderr"
changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ when: generation_apply.rc != 0
diff --git a/roles/openshift_provisioners/tasks/start_cluster.yaml b/roles/openshift_provisioners/tasks/start_cluster.yaml
new file mode 100644
index 000000000..ee7f545a9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/start_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: start efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 1
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/stop_cluster.yaml b/roles/openshift_provisioners/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..30b6b12c8
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/stop_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: stop efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 0
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
new file mode 100644
index 000000000..0be4bc7d2
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -0,0 +1,43 @@
+---
+- name: stop provisioners
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete provisioner api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - dc
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete provisioner secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - provisioners-efs
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - run-provisioners-efs
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ oc_serviceaccount:
+ name: "{{ item }}"
+ namespace: "{{ openshift_provisioners_project }}"
+ state: absent
+ with_items:
+ - provisioners-efs
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2
index 2d25ff1fb..994afa32d 100644
--- a/roles/openshift_logging/templates/clusterrolebinding.j2
+++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2
@@ -2,6 +2,12 @@ apiVersion: v1
kind: ClusterRoleBinding
metadata:
name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
{% if crb_usernames is defined %}
userNames:
{% for name in crb_usernames %}
@@ -21,4 +27,4 @@ subjects:
namespace: {{sub.namespace}}
{% endfor %}
roleRef:
- name: {{obj_name}}
+ name: {{cr_name}}
diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2
new file mode 100644
index 000000000..81b9ccca5
--- /dev/null
+++ b/roles/openshift_provisioners/templates/efs.j2
@@ -0,0 +1,58 @@
+kind: DeploymentConfig
+apiVersion: v1
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+spec:
+ replicas: {{replica_count}}
+ selector:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ spec:
+ serviceAccountName: "{{deploy_serviceAccount}}"
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: efs-provisioner
+ image: {{openshift_provisioners_image_prefix}}efs-provisioner:{{openshift_provisioners_image_version}}
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-access-key-id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-secret-access-key
+ - name: FILE_SYSTEM_ID
+ value: "{{openshift_provisioners_efs_fsid}}"
+ - name: AWS_REGION
+ value: "{{openshift_provisioners_efs_region}}"
+ - name: PROVISIONER_NAME
+ value: "{{openshift_provisioners_efs_name}}"
+ volumeMounts:
+ - name: pv-volume
+ mountPath: /persistentvolumes
+ securityContext:
+ supplementalGroups:
+ - {{openshift_provisioners_efs_supplementalgroup}}
+ volumes:
+ - name: pv-volume
+ persistentVolumeClaim:
+ claimName: "{{claim_name}}"
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
new file mode 100644
index 000000000..f4128f9f0
--- /dev/null
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{obj_name}}
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ capacity:
+ storage: {{size}}
+ accessModes:
+{% for mode in access_modes %}
+ - {{mode}}
+{% endfor %}
+ {{volume_plugin}}:
+{% for s in volume_source %}
+ {{s.key}}: {{s.value}}
+{% endfor %}
+{% if claim_name is defined%}
+ claimRef:
+ name: {{claim_name}}
+ namespace: {{openshift_provisioners_project}}
+{% endif %}
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
index 07d81afff..83d503056 100644
--- a/roles/openshift_logging/templates/pvc.j2
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -1,9 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: "{{obj_name}}"
- labels:
- logging-infra: support
+ name: {{obj_name}}
{% if annotations is defined %}
annotations:
{% for key,value in annotations.iteritems() %}
@@ -20,8 +18,9 @@ spec:
{% endif %}
accessModes:
{% for mode in access_modes %}
- - {{ mode }}
+ - {{mode}}
{% endfor %}
resources:
requests:
storage: {{size}}
+
diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2
new file mode 100644
index 000000000..78824095b
--- /dev/null
+++ b/roles/openshift_provisioners/templates/secret.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+type: Opaque
+data:
+{% for s in secrets %}
+ "{{s.key}}" : "{{s.value | b64encode}}"
+{% endfor %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2
index b22acc594..b22acc594 100644
--- a/roles/openshift_logging/templates/serviceaccount.j2
+++ b/roles/openshift_provisioners/templates/serviceaccount.j2
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 95b155b29..abd1997dd 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin, online |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|------------------------------------|
+| openshift_deployment_type | None | Possible values enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 7c5a14cd7..44f34ea7b 100644
--- a/roles/openshift_repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,2 +1,3 @@
---
openshift_additional_repos: {}
+openshift_repos_enable_testing: false
diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
index 124bff09d..09364c26f 100644
--- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
+++ b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
@@ -3,7 +3,7 @@ name=CentOS OpenShift Origin
baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin/
enabled=1
gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
[centos-openshift-origin-testing]
name=CentOS OpenShift Origin Testing
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 84a0905cc..7458db87e 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -4,7 +4,8 @@
path: /run/ostree-booted
register: ostree_booted
-- block:
+- when: not ostree_booted.stat.exists
+ block:
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
@@ -24,20 +25,44 @@
- openshift_additional_repos | length == 0
notify: refresh cache
- - name: Configure origin gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/
- notify: refresh cache
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution != "Fedora"
- - openshift_deployment_type == 'origin'
- - openshift_enable_origin_repo | default(true) | bool
+ # Singleton block
+ - when: r_openshift_repos_has_run is not defined
+ block:
+
+ # Note: OpenShift repositories under CentOS may be shipped through the
+ # "centos-release-openshift-origin" package which configures the repository.
+ # This task matches the file names provided by the package so that they are
+ # not installed twice in different files and remains idempotent.
+ - name: Configure origin repositories and gpg keys if needed
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
+ dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+ - src: origin/repos/openshift-ansible-centos-paas-sig.repo
+ dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
+ notify: refresh cache
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution != "Fedora"
+ - openshift_deployment_type == 'origin'
+ - openshift_enable_origin_repo | default(true) | bool
+
+ - name: Enable centos-openshift-origin-testing repository
+ command: yum-config-manager --enable centos-openshift-origin-testing
+ when: openshift_repos_enable_testing | bool
+
+ - name: Ensure clean repo cache in the event repos have been changed manually
+ debug:
+ msg: "First run of openshift_repos"
+ changed_when: true
+ notify: refresh cache
+
+ - name: Record that openshift_repos already ran
+ set_fact:
+ r_openshift_repos_has_run: True
- when: not ostree_booted.stat.exists
+ # Force running ALL handlers now, because we expect repo cache to be cleared
+ # if changes have been made.
+ - meta: flush_handlers
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index fc562c42c..59ce505d3 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,6 +1,18 @@
---
+- name: Abort when conflicting deployment type variables are set
+ when:
+ - deployment_type is defined
+ - openshift_deployment_type is defined
+ - openshift_deployment_type != deployment_type
+ fail:
+ msg: |-
+ openshift_deployment_type is set to "{{ openshift_deployment_type }}".
+ deployment_type is set to "{{ deployment_type }}".
+ To avoid unexpected results, this conflict is not allowed.
+ deployment_type is deprecated in favor of openshift_deployment_type.
+ Please specify only openshift_deployment_type, or make both the same.
+
- name: Standardize on latest variable names
- no_log: True # keep task description legible
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
# both will be accepted for now, but code should refer to the new name.
@@ -8,8 +20,15 @@
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+- name: Abort when deployment type is invalid
+ # this variable is required; complain early and clearly if it is invalid.
+ when: openshift_deployment_type not in known_openshift_deployment_types
+ fail:
+ msg: |-
+ Please set openshift_deployment_type to one of:
+ {{ known_openshift_deployment_types | join(', ') }}
+
- name: Normalize openshift_release
- no_log: True # keep task description legible
set_fact:
# Normalize release if provided, e.g. "v3.5" => "3.5"
# Currently this is not required to be defined for all installs, and the
@@ -19,10 +38,15 @@
openshift_release: "{{ openshift_release | string | regex_replace('^v', '') }}"
when: openshift_release is defined
-- name: Ensure a valid deployment type has been given.
- # this variable is required; complain early and clearly if it is invalid.
- when: openshift_deployment_type not in known_openshift_deployment_types
+- name: Abort when openshift_release is invalid
+ when:
+ - openshift_release is defined
+ - not openshift_release | match('\d+(\.\d+){1,3}$')
fail:
msg: |-
- Please set openshift_deployment_type to one of:
- {{ known_openshift_deployment_types | join(', ') }}
+ openshift_release is "{{ openshift_release }}" which is not a valid version string.
+ Please set it to a version string like "3.4".
+
+- include: unsupported.yml
+ when:
+ - not openshift_enable_unsupported_configurations | default(false) | bool
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
new file mode 100644
index 000000000..24e44ea85
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -0,0 +1,12 @@
+---
+# This task list checks for unsupported configurations. Values here should yield
+# a partially functioning cluster but would not be supported for production use.
+
+- name: Ensure that openshift_use_dnsmasq is true
+ when:
+ - not openshift_use_dnsmasq | default(true) | bool
+ fail:
+ msg: |-
+ Starting in 3.6 openshift_use_dnsmasq must be true or critical features
+ will not function. This also means that NetworkManager must be installed
+ enabled and responsible for management of the primary interface.
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
new file mode 100644
index 000000000..01ee2544d
--- /dev/null
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_service_catalog_remove: false
+openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
new file mode 100644
index 000000000..71e21a269
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -0,0 +1,199 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: service-catalog
+objects:
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer
+ rules:
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - serviceclasses
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer-binding
+ roleRef:
+ name: servicecatalog-serviceclass-viewer
+ groupNames:
+ - system:authenticated
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: sar-creator
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - subjectaccessreviews.authorization.k8s.io
+ verbs:
+ - create
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-sar-creator-binding
+ roleRef:
+ name: sar-creator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: namespace-viewer
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - brokers/status
+ - instances/status
+ - bindings/status
+ verbs:
+ - update
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - brokers
+ - instances
+ - bindings
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - patch
+ - create
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - serviceclasses
+ verbs:
+ - create
+ - delete
+ - update
+ - patch
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - settings.k8s.io
+ resources:
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-binding
+ roleRef:
+ name: service-catalog-controller
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - list
+ - watch
+ - get
+ - create
+ - update
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor-binding
+ roleRef:
+ name: endpoint-accessor
+ namespace: kube-service-catalog
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: system:auth-delegator-binding
+ roleRef:
+ name: system:auth-delegator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
new file mode 100644
index 000000000..f6ee0955d
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: kube-system-service-catalog
+objects:
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - extension-apiserver-authentication
+ resources:
+ - configmaps
+ verbs:
+ - get
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader-binding
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ roleRef:
+ name: extension-apiserver-authentication-reader
+ namespace: kube-system
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+parameters:
+- description: Do not change this value.
+ displayName: Name of the kube-system namespace
+ name: KUBE_SYSTEM_NAMESPACE
+ required: true
+ value: kube-system
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..1f25cc39f
--- /dev/null
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_service_catalog/meta/main.yml b/roles/openshift_service_catalog/meta/main.yml
new file mode 100644
index 000000000..1e6b837cd
--- /dev/null
+++ b/roles/openshift_service_catalog/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Service Catalog
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
new file mode 100644
index 000000000..cc897b032
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -0,0 +1,70 @@
+---
+- name: Create service catalog cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/service-catalog"
+ state: directory
+ mode: 0755
+ changed_when: False
+ check_mode: no
+
+- set_fact:
+ generated_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+
+- name: Generate signing cert
+ command: >
+ {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
+ --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+
+- name: Generating server keys
+ oc_adm_ca_server_cert:
+ cert: "{{ generated_certs_dir }}/apiserver.crt"
+ key: "{{ generated_certs_dir }}/apiserver.key"
+ hostnames: "apiserver.kube-service-catalog.svc,apiserver.kube-service-catalog.svc.cluster.local,apiserver.kube-service-catalog"
+ signer_cert: "{{ generated_certs_dir }}/ca.crt"
+ signer_key: "{{ generated_certs_dir }}/ca.key"
+ signer_serial: "{{ generated_certs_dir }}/apiserver.serial.txt"
+
+- name: Create apiserver-ssl secret
+ oc_secret:
+ state: present
+ name: apiserver-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ - name: tls.key
+ path: "{{ generated_certs_dir }}/apiserver.key"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/ca.crt"
+ register: apiserver_ca
+
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
+- name: Create api service
+ oc_obj:
+ state: present
+ name: v1alpha1.servicecatalog.k8s.io
+ kind: apiservices.apiregistration.k8s.io
+ namespace: "kube-service-catalog"
+ content:
+ path: /tmp/apisvcout
+ data:
+ apiVersion: apiregistration.k8s.io/v1beta1
+ kind: APIService
+ metadata:
+ name: v1alpha1.servicecatalog.k8s.io
+ spec:
+ group: servicecatalog.k8s.io
+ version: v1alpha1
+ service:
+ namespace: "kube-service-catalog"
+ name: apiserver
+ caBundle: "{{ apiserver_ca.content }}"
+ groupPriorityMinimum: 20
+ versionPriority: 10
+ when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
new file mode 100644
index 000000000..686857d94
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -0,0 +1,239 @@
+---
+# do any asserts here
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- include: wire_aggregator.yml
+
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set service_catalog image facts
+ set_fact:
+ openshift_service_catalog_image_prefix: "{{ openshift_service_catalog_image_prefix | default(__openshift_service_catalog_image_prefix) }}"
+ openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(__openshift_service_catalog_image_version) }}"
+
+- name: Set Service Catalog namespace
+ oc_project:
+ state: present
+ name: "kube-service-catalog"
+ node_selector: ""
+
+- name: Make kube-service-catalog project network global
+ command: >
+ oc adm pod-network make-projects-global kube-service-catalog
+ when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant'
+
+- include: generate_certs.yml
+
+- copy:
+ src: kubeservicecatalog_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+
+- oc_obj:
+ name: service-catalog
+ kind: template
+ namespace: "kube-service-catalog"
+ files:
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: service-catalog
+ namespace: "kube-service-catalog"
+
+- copy:
+ src: kubesystem_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+
+- oc_obj:
+ name: kube-system-service-catalog
+ kind: template
+ namespace: kube-system
+ files:
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: kube-system-service-catalog
+ namespace: kube-system
+
+- oc_obj:
+ name: edit
+ kind: clusterrole
+ state: list
+ register: edit_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/edit
+ template:
+ src: sc_role_patching.j2
+ dest: "{{ mktemp.stdout }}/edit_sc_patch.yml"
+ vars:
+ original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update edit role for service catalog and pod preset access
+ command: >
+ oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
+ when:
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+- oc_obj:
+ name: admin
+ kind: clusterrole
+ state: list
+ register: admin_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/admin
+ template:
+ src: sc_role_patching.j2
+ dest: "{{ mktemp.stdout }}/admin_sc_patch.yml"
+ vars:
+ original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update admin role for service catalog and pod preset access
+ command: >
+ oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
+ when:
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+- shell: >
+ oc get policybindings/kube-system:default -n kube-system || echo "not found"
+ register: get_kube_system
+ changed_when: no
+
+- command: >
+ oc create policybinding kube-system -n kube-system
+ when: "'not found' in get_kube_system.stdout"
+
+- oc_adm_policy_user:
+ namespace: kube-service-catalog
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:kube-service-catalog:service-catalog-apiserver"
+
+- name: Set SA cluster-role
+ oc_adm_policy_user:
+ state: present
+ namespace: "kube-service-catalog"
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:kube-service-catalog:default"
+
+- name: Checking for master.etcd-ca.crt
+ stat:
+ path: /etc/origin/master/master.etcd-ca.crt
+ register: etcd_ca_crt
+ check_mode: no
+
+## api server
+- template:
+ src: api_server.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ vars:
+ image: ""
+ namespace: ""
+ cpu_limit: none
+ memory_limit: none
+ cpu_requests: none
+ memory_request: none
+ cors_allowed_origin: localhost
+ etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}"
+ etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}"
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Service Catalog API Server daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_service.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+
+- name: Set Service Catalog API Server service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_route.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+
+- name: Set Service Catalog API Server route
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ delete_after: yes
+
+## controller manager
+- template:
+ src: controller_manager.j2
+ dest: "{{ mktemp.stdout }}/controller_manager.yml"
+ vars:
+ image: ""
+ cpu_limit: none
+ memory_limit: none
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Controller Manager deployment
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager.yml"
+ delete_after: yes
+
+- template:
+ src: controller_manager_service.j2
+ dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
+
+- name: Set Controller Manager service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager_service.yml"
+ delete_after: yes
+
+- include: start_api_server.yml
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml
new file mode 100644
index 000000000..dc0d6a370
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not openshift_service_catalog_remove | default(false) | bool
+
+- include: remove.yml
+ when: openshift_service_catalog_remove | default(false) | bool
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
new file mode 100644
index 000000000..2fb1ec440
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -0,0 +1,56 @@
+---
+- name: Remove Service Catalog APIServer
+ command: >
+ oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+
+- name: Remove Policy Binding
+ command: >
+ oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+
+# TODO: this module doesn't currently remove this
+#- name: Remove service catalog api service
+# oc_obj:
+# state: absent
+# namespace: "kube-service-catalog"
+# kind: apiservices.apiregistration.k8s.io
+# name: v1alpha1.servicecatalog.k8s.io
+
+- name: Remove Service Catalog API Server route
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+
+- name: Remove Service Catalog API Server service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+
+- name: Remove Service Catalog API Server daemonset
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+
+- name: Remove Controller Manager service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+
+- name: Remove Controller Manager deployment
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: deployment
+ name: controller-manager
+
+- name: Remove Service Catalog namespace
+ oc_project:
+ state: absent
+ name: "kube-service-catalog"
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
new file mode 100644
index 000000000..b143292b6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -0,0 +1,22 @@
+---
+# Label nodes and wait for apiserver and controller to be running (at least one)
+- name: Label {{ openshift.node.nodename }} for APIServer and controller deployment
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+
+# wait to see that the apiserver is available
+- name: wait for api server to be ready
+ command: >
+ curl -k https://apiserver.kube-service-catalog.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..d5291a99a
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -0,0 +1,206 @@
+---
+- name: Make temp cert dir
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: certtemp
+ changed_when: False
+
+- name: Check for First Master Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_crt
+ changed_when: false
+ delegate_to: "{{ first_master }}"
+
+- name: Check for First Master Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_key
+ changed_when: false
+ delegate_to: "{{ first_master }}"
+
+
+# TODO: this currently has a bug where hostnames are required
+- name: Creating First Master Aggregator signer certs
+ command: >
+ oc adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ first_master }}"
+ when:
+ - not first_proxy_ca_crt.stat.exists
+ - not first_proxy_ca_key.stat.exists
+
+- name: Check for Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_crt
+ changed_when: false
+
+- name: Check for Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_key
+ changed_when: false
+
+- name: Copy Aggregator Signer certs from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ delegate_to: "{{ first_master }}"
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+- name: Copy Aggregator Signer certs to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Check for first master api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: first_front_proxy_kubeconfig
+ delegate_to: "{{ first_master }}"
+
+- name: Create first master api-client config for Aggregator
+ command: >
+ oc adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir=/etc/origin/master
+ --signer-serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ first_master }}"
+ when:
+ - not first_front_proxy_kubeconfig.stat.exists
+
+- name: Check for api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: front_proxy_kubeconfig
+
+- name: Copy api-client config from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ delegate_to: "{{ first_master }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: Copy api-client config to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ - key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ - key: kubernetesMasterConfig.apiServerArguments.runtime-config
+ value: [apis/settings.k8s.io/v1alpha1=true]
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
+ value: DefaultAdmissionConfig
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
+ value: v1
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
+ value: false
+ register: yedit_output
+
+#restart master serially here
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is not defined or not openshift.master.ha | bool
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
+
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
new file mode 100644
index 000000000..c09834fd4
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -0,0 +1,79 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: apiserver
+ name: apiserver
+spec:
+ selector:
+ matchLabels:
+ app: apiserver
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: apiserver
+ spec:
+ serviceAccountName: service-catalog-apiserver
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - args:
+ - --storage-type
+ - etcd
+ - --secure-port
+ - "6443"
+ - --etcd-servers
+ - {{ etcd_servers }}
+ - --etcd-cafile
+ - {{ etcd_cafile }}
+ - --etcd-certfile
+ - /etc/origin/master/master.etcd-client.crt
+ - --etcd-keyfile
+ - /etc/origin/master/master.etcd-client.key
+ - -v
+ - "10"
+ - --cors-allowed-origins
+ - {{ cors_allowed_origin }}
+ - --admission-control
+ - "KubernetesNamespaceLifecycle"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/apiserver"]
+ imagePullPolicy: Always
+ name: apiserver
+ ports:
+ - containerPort: 6443
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: apiserver-ssl
+ readOnly: true
+ - mountPath: /etc/origin/master
+ name: etcd-host-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: apiserver-ssl
+ secret:
+ defaultMode: 420
+ secretName: apiserver-ssl
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ - hostPath:
+ path: /etc/origin/master
+ name: etcd-host-cert
+ - emptyDir: {}
+ name: data-dir
diff --git a/roles/openshift_service_catalog/templates/api_server_route.j2 b/roles/openshift_service_catalog/templates/api_server_route.j2
new file mode 100644
index 000000000..3c3da254d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_route.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Route
+metadata:
+ name: apiserver
+spec:
+ port:
+ targetPort: secure
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: apiserver
+ weight: 100
+ wildcardPolicy: None
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
new file mode 100644
index 000000000..bae337201
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: apiserver
+spec:
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
new file mode 100644
index 000000000..1bbc0fa2c
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -0,0 +1,47 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: controller-manager
+ name: controller-manager
+spec:
+ selector:
+ matchLabels:
+ app: controller-manager
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: controller-manager
+ spec:
+ serviceAccountName: service-catalog-controller
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - env:
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ args:
+ - -v
+ - "5"
+ - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/controller-manager"]
+ imagePullPolicy: Always
+ name: controller-manager
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
new file mode 100644
index 000000000..2bac645fc
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: controller-manager
+spec:
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_role_patching.j2
new file mode 100644
index 000000000..69b062b3f
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/sc_role_patching.j2
@@ -0,0 +1,26 @@
+{{ original_content }}
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - instances
+ - bindings
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+- apiGroups:
+ - "settings.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml
new file mode 100644
index 000000000..6fb9d1b86
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-"
+__openshift_service_catalog_image_version: "latest"
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..4df60e9a8
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-"
+__openshift_service_catalog_image_version: "v3.6"
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
new file mode 100644
index 000000000..b367e7daf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -0,0 +1,156 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Configuration
+
+This role handles the configuration of GlusterFS clusters. It can handle
+two primary configuration scenarios:
+
+* Configuring a new, natively-hosted GlusterFS cluster. In this scenario,
+ GlusterFS pods are deployed on nodes in the OpenShift cluster which are
+ configured to provide storage.
+* Configuring a new, external GlusterFS cluster. In this scenario, the
+ cluster nodes have the GlusterFS software pre-installed but have not
+ been configured yet. The installer will take care of configuring the
+ cluster(s) for use by OpenShift applications.
+* Using existing GlusterFS clusters. In this scenario, one or more
+ GlusterFS clusters are assumed to be already setup. These clusters can
+ be either natively-hosted or external, but must be managed by a
+ [heketi service](https://github.com/heketi/heketi).
+
+As part of the configuration, a particular GlusterFS cluster may be
+specified to provide backend storage for a natively-hosted Docker
+registry.
+
+Unless configured otherwise, a StorageClass will be automatically
+created for each non-registry GlusterFS cluster. This will allow
+applications which can mount PersistentVolumes to request
+dynamically-provisioned GlusterFS volumes.
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Host Groups
+-----------
+
+The following group is expected to be populated for this role to run:
+
+* `[glusterfs]`
+
+Additionally, the following group may be specified either in addition to or
+instead of the above group to deploy a GlusterFS cluster for use by a natively
+hosted Docker registry:
+
+* `[glusterfs_registry]`
+
+Host Variables
+--------------
+
+For configuring new clusters, the following role variables are available.
+
+Each host in either of the above groups must have the following variable
+defined:
+
+| Name | Default value | Description |
+|-------------------|---------------|-----------------------------------------|
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+
+In addition, each host may specify the following variables to further control
+their configuration as GlusterFS nodes:
+
+| Name | Default value | Description |
+|--------------------|---------------------------|-----------------------------------------|
+| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
+| glusterfs_hostname | openshift.node.nodename | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
+| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
+
+Role Variables
+--------------
+
+This role has the following variables that control the integration of a
+GlusterFS cluster into a new or existing OpenShift cluster:
+
+| Name | Default value | Description |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
+| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
+| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_keyfile | '/dev/null' | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Each role variable also has a corresponding variable to optionally configure a
+separate GlusterFS cluster for use as storage for an integrated Docker
+registry. These variables start with the prefix
+`openshift_storage_glusterfs_registry_` and, for the most part, default to the
+values in their corresponding non-registry variables. The following variables
+are an exception:
+
+| Name | Default value | Description |
+|-------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+
+Additionally, this role's behavior responds to the following registry-specific
+variables:
+
+| Name | Default value | Description |
+|-----------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
new file mode 100644
index 000000000..a846889ca
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -0,0 +1,54 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'glusterfs'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_name: 'storage'
+openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_storageclass: True
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_cli: 'heketi-cli'
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ omit }}"
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+openshift_storage_glusterfs_heketi_port: 8080
+openshift_storage_glusterfs_heketi_executor: 'kubernetes'
+openshift_storage_glusterfs_heketi_ssh_port: 22
+openshift_storage_glusterfs_heketi_ssh_user: 'root'
+openshift_storage_glusterfs_heketi_ssh_sudo: False
+openshift_storage_glusterfs_heketi_ssh_keyfile: '/dev/null'
+
+openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
+openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_registry_name: 'registry'
+openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_storageclass: False
+openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
+openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}"
+openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}"
+openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
+openshift_storage_glusterfs_registry_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}"
+openshift_storage_glusterfs_registry_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}"
+openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}"
+openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
+openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
+openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
new file mode 100644
index 000000000..9ebb0d5ec
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -0,0 +1,143 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
new file mode 100644
index 000000000..a86c96df7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Storage GlusterFS Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
new file mode 100644
index 000000000..aab9851f9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jose A. Rivera
+ description: OpenShift GlusterFS Cluster
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
new file mode 100644
index 000000000..600d8f676
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -0,0 +1,253 @@
+---
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+ when:
+ - not openshift.common.is_atomic | bool
+ - not glusterfs_heketi_is_native | bool
+
+- name: Verify heketi-cli is installed
+ shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }"
+ changed_when: False
+ when:
+ - not glusterfs_heketi_is_native | bool
+
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ glusterfs_namespace }}"
+ when: glusterfs_is_native or glusterfs_heketi_is_native
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,dc,jobs,secret"
+ selector: "deploy-heketi"
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-config-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ - kind: "sa"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ failed_when: False
+ when: glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ glusterfs_namespace }}"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ glusterfs_heketi_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
+- name: Generate heketi config file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi.json.j2"
+ dest: "{{ mktemp.stdout }}/heketi.json"
+ when:
+ - glusterfs_heketi_is_native
+
+- name: Generate heketi admin key
+ set_fact:
+ glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_admin_key is undefined
+
+- name: Generate heketi user key
+ set_fact:
+ glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+ until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key"
+ delay: 1
+ retries: 10
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_user_key is undefined
+
+- name: Create heketi config secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-config-secret"
+ force: True
+ files:
+ - name: heketi.json
+ path: "{{ mktemp.stdout }}/heketi.json"
+ - name: private_key
+ path: "{{ glusterfs_heketi_ssh_keyfile }}"
+ when:
+ - glusterfs_heketi_is_native
+
+- include: heketi_deploy_part1.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+
+- name: Verify heketi service
+ command: "{{ glusterfs_heketi_client }} cluster list"
+ changed_when: False
+
+- name: Load heketi topology
+ command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_is_missing
+
+- name: Create heketi secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when:
+ - glusterfs_storageclass
+ - glusterfs_heketi_admin_key is defined
+
+- name: Get heketi route
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: route
+ state: list
+ name: "heketi-{{ glusterfs_name }}"
+ register: heketi_route
+ when:
+ - glusterfs_storageclass
+ - glusterfs_heketi_is_native
+
+- name: Determine StorageClass heketi URL
+ set_fact:
+ glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+ when:
+ - glusterfs_storageclass
+ - glusterfs_heketi_is_native
+
+- name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
+
+- name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
new file mode 100644
index 000000000..7a2987883
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -0,0 +1,31 @@
+---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe | bool }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native | bool }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing | bool }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing | bool }}"
+ glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load | bool }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe | bool }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+ glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}"
+ glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}"
+ glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}"
+ glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
+ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo | bool }}"
+ glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
+ glusterfs_nodes: "{{ groups.glusterfs }}"
+
+- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
new file mode 100644
index 000000000..3db5cc389
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -0,0 +1,113 @@
+---
+- assert:
+ that: "glusterfs_nodes | count >= 3"
+ msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
+ state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
+ when: glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ hostvars[item].openshift.node.nodename }}"
+ kind: node
+ state: absent
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+ when: glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ when: glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ failed_when: False
+ when: glusterfs_wipe
+
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
+ when:
+ - glusterfs_wipe
+ - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+
+- name: Label GlusterFS nodes
+ oc_label:
+ name: "{{ hostvars[item].openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+
+- name: Copy GlusterFS DaemonSet template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: "glusterfs"
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "glusterfs"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_image }}"
+ IMAGE_VERSION: "{{ glusterfs_version }}"
+ NODE_LABELS: "{{ glusterfs_nodeselector }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for GlusterFS pods
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs={{ glusterfs_name }}-pod"
+ register: glusterfs_pods
+ until:
+ - "glusterfs_pods.results.results[0]['items'] | count > 0"
+ # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
new file mode 100644
index 000000000..e46cec378
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -0,0 +1,81 @@
+---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe | bool }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native | bool }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing | bool }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing | bool }}"
+ glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_registry_heketi_cli }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load | bool }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe | bool }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
+ glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}"
+ glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_registry_heketi_executor }}"
+ glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_registry_heketi_ssh_port }}"
+ glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_registry_heketi_ssh_user }}"
+ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"
+ glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
+
+- include: glusterfs_common.yml
+ when:
+ - glusterfs_nodes | default([]) | count > 0
+ - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
+
+- name: Delete pre-existing GlusterFS registry resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
+ state: absent
+ with_items:
+ - kind: "svc"
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
+ failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: service
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+ command: "{{ glusterfs_heketi_client }} volume list"
+ register: registry_volume
+
+- name: Create GlusterFS registry volume
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
new file mode 100644
index 000000000..3ba1eb2d2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -0,0 +1,56 @@
+---
+- name: Copy initial heketi resource files
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "deploy-heketi-template.yml"
+
+- name: Create heketi topology secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ force: True
+ files:
+ - name: topology.json
+ path: "{{ mktemp.stdout }}/topology.json"
+
+- name: Create deploy-heketi template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: "deploy-heketi"
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "deploy-heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+ TOPOLOGY_PATH: "{{ mktemp.stdout }}"
+
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
new file mode 100644
index 000000000..37d3e6ba2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -0,0 +1,133 @@
+---
+- name: Create heketi DB volume
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
+ register: setup_storage
+
+- name: Copy heketi-storage list
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+ when: setup_storage.rc == 0
+
+- name: Wait for copy job to finish
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: job
+ state: list
+ name: "heketi-storage-copy-job"
+ register: heketi_job
+ until:
+ - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+ # Pod's 'Complete' status must be True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ failed_when:
+ - "'results' in heketi_job.results"
+ - "heketi_job.results.results | count > 0"
+ # Fail when pod's 'Failed' status is True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ when: setup_storage.rc == 0
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Generate heketi endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Copy heketi template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+ dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+
+- name: Verify heketi service
+ command: "{{ glusterfs_heketi_client }} cluster list"
+ changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
new file mode 100644
index 000000000..d2d8c6c10
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- include: glusterfs_config.yml
+ when:
+ - groups.glusterfs | default([]) | count > 0
+
+- include: glusterfs_registry.yml
+ when:
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 0d6b8b7d4..019ada2fb 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -30,7 +30,7 @@
- "{{ openshift.hosted.metrics }}"
- "{{ openshift.hosted.logging }}"
- "{{ openshift.hosted.loggingops }}"
-
+ - "{{ openshift.hosted.etcd }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 8c6d4105c..7e8f70b23 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -2,3 +2,4 @@
{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }}
{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }}
+{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 37c80c29e..ca896addd 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -16,3 +16,4 @@ dependencies:
- role: openshift_docker_facts
- role: docker
when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+- role: lib_utils
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 35953b744..f4cb8ddb2 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -3,22 +3,71 @@
- set_fact:
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
+ is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
- when: is_containerized | bool and openshift.common.deployment_type == 'origin' and openshift_release is not defined and openshift_image_tag is not defined
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
+ when:
+ - is_containerized | bool
+ - openshift.common.deployment_type == 'origin'
+ - openshift_release is not defined
+ - openshift_image_tag is not defined
# Normalize some values that we need in a certain format that might be confusing:
- set_fact:
- openshift_image_tag: "{{ 'v' + openshift_image_tag }}"
- when: openshift_image_tag is defined and openshift_image_tag[0] != 'v' and openshift_image_tag != 'latest'
+ openshift_release: "{{ openshift_release[1:] }}"
+ when:
+ - openshift_release is defined
+ - openshift_release[0] == 'v'
- set_fact:
- openshift_pkg_version: "{{ '-' + openshift_pkg_version }}"
- when: openshift_pkg_version is defined and openshift_pkg_version[0] != '-'
+ openshift_release: "{{ openshift_release | string }}"
+ when:
+ - openshift_release is defined
+
+# Verify that the image tag is in a valid format
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
+
+ # Verifies that when the deployment type is origin the version:
+ # - starts with a v
+ # - Has 3 integers seperated by dots
+ # It also allows for optional trailing data which:
+ # - must start with a dash
+ # - may contain numbers, letters, dashes and dots.
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
+ assert:
+ that:
+ - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
+
+ # Verifies that when the deployment type is openshift-enterprise the version:
+ # - starts with a v
+ # - Has at least 2 integers seperated by dots
+ # It also allows for optional trailing data which:
+ # - must start with a dash
+ # - may contain numbers
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
+ assert:
+ that:
+ - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -30,54 +79,124 @@
- name: Use openshift.common.version fact as version to configure if already installed
set_fact:
openshift_version: "{{ openshift.common.version }}"
- when: openshift.common.version is defined and openshift_version is not defined and openshift_protect_installed_version | bool
-
-- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
- when: not is_containerized | bool
-
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
- when: is_containerized | bool
-
-# At this point we know openshift_version is set appropriately. Now we set
-# openshift_image_tag and openshift_pkg_version, so all roles can always assume
-# each of this variables *will* be set correctly and can use them per their
-# intended purpose.
-
-- set_fact:
- openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
-
-- set_fact:
- openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
-
-- fail:
- msg: openshift_version role was unable to set openshift_version
- when: openshift_version is not defined
-
-- fail:
- msg: openshift_version role was unable to set openshift_image_tag
- when: openshift_image_tag is not defined
-
-- fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- when: openshift_pkg_version is not defined
-
-- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
- when: not is_containerized | bool and openshift_version == '0.0'
-
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
-# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
- when: not is_containerized | bool and openshift_release is defined and not openshift_version.startswith(openshift_release) | bool
-
-# The end result of these three variables is quite important so make sure they are displayed and logged:
-- debug: var=openshift_release
-
-- debug: var=openshift_image_tag
-
-- debug: var=openshift_pkg_version
+ when:
+ - openshift.common.version is defined
+ - openshift_version is not defined
+ - openshift_protect_installed_version | bool
+
+# The rest of these tasks should only execute on
+# masters and nodes as we can verify they have subscriptions
+- when:
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ block:
+ - name: Set openshift_version for rpm installation
+ include: set_version_rpm.yml
+ when: not is_containerized | bool
+
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ when: is_containerized | bool
+
+ - block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
+ when:
+ - is_containerized | bool
+ - not is_atomic | bool
+
+ # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+ # NOTE: This will need to be modified/removed for future container + rpm installations work.
+ - name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
+
+ # At this point we know openshift_version is set appropriately. Now we set
+ # openshift_image_tag and openshift_pkg_version, so all roles can always assume
+ # each of this variables *will* be set correctly and can use them per their
+ # intended purpose.
+
+ - block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
+
+ when: openshift_image_tag is not defined
+
+ - block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+
+ when: openshift_pkg_version is not defined
+
+ - fail:
+ msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
+ when: openshift_version is not defined
+
+ - fail:
+ msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
+ when: openshift_image_tag is not defined
+
+ - fail:
+ msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
+ when: openshift_pkg_version is not defined
+
+ - fail:
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
+
+ # We can't map an openshift_release to full rpm version like we can with containers; make sure
+ # the rpm version we looked up matches the release requested and error out if not.
+ - name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
+
+ # The end result of these three variables is quite important so make sure they are displayed and logged:
+ - debug: var=openshift_release
+
+ - debug: var=openshift_image_tag
+
+ - debug: var=openshift_pkg_version
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index cd0f20ae9..0ec4c49d6 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -4,12 +4,16 @@
# Expects a leading "v" in inventory, strip it off here unless
# openshift_image_tag=latest
openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
- when: openshift_image_tag is defined and openshift_version is not defined
+ when:
+ - openshift_image_tag is defined
+ - openshift_version is not defined
- name: Set containerized version to configure if openshift_release specified
set_fact:
openshift_version: "{{ openshift_release }}"
- when: openshift_release is defined and openshift_version is not defined
+ when:
+ - openshift_release is defined
+ - openshift_version is not defined
- name: Lookup latest containerized version if no version specified
command: >
@@ -20,7 +24,10 @@
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
openshift_version: "{{ (cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-'))[1:] }}"
- when: openshift_version is not defined and openshift.common.deployment_type == 'origin' and cli_image_version.stdout_lines[0].split('-') | length > 1
+ when:
+ - openshift_version is not defined
+ - openshift.common.deployment_type == 'origin'
+ - cli_image_version.stdout_lines[0].split('-') | length > 1
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -32,11 +39,15 @@
command: >
docker run --rm {{ openshift.common.cli_image }}:v{{ openshift_version }} version
register: cli_image_version
- when: openshift_version is defined and openshift_version.split('.') | length == 2
+ when:
+ - openshift_version is defined
+ - openshift_version.split('.') | length == 2
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is defined and openshift_version.split('.') | length == 2
+ when:
+ - openshift_version is defined
+ - openshift_version.split('.') | length == 2
# We finally have the specific version. Now we clean up any strange
# dangly +c0mm1t-offset tags in the version. See also,
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index 0c2ef4bb7..c40777bf1 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -3,44 +3,22 @@
set_fact:
# Expects a leading "-" in inventory, strip it off here, and remove trailing release,
openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
- when: openshift_pkg_version is defined and openshift_version is not defined
+ when:
+ - openshift_pkg_version is defined
+ - openshift_version is not defined
-# if {{ openshift.common.service_type}}-excluder is enabled,
-# the repoquery for {{ openshift.common.service_type}} will not work.
-# Thus, create a temporary yum,conf file where exclude= is set to an empty list
-- name: Create temporary yum.conf file
- command: mktemp -d /tmp/yum.conf.XXXXXX
- register: yum_conf_temp_file_result
+- block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
-- set_fact:
- yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf"
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
-- name: Copy yum.conf into the temporary file
- copy:
- src: /etc/yum.conf
- dest: "{{ yum_conf_temp_file }}"
- remote_src: True
-
-- name: Clear the exclude= list in the temporary yum.conf
- lineinfile:
- # since ansible 2.3 s/dest/path
- dest: "{{ yum_conf_temp_file }}"
- regexp: '^exclude='
- line: 'exclude='
-
-- name: Gather common package version
- command: >
- {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}"
- register: common_version
- failed_when: false
- changed_when: false
- when: openshift_version is not defined
-
-- name: Delete the temporary yum.conf
- file:
- path: "{{ yum_conf_temp_file_result.stdout }}"
- state: absent
-
-- set_fact:
- openshift_version: "{{ common_version.stdout | default('0.0', True) }}"
- when: openshift_version is not defined
+ - set_fact:
+ openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ when:
+ - openshift_version is not defined
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index 43db3cc74..e7ef544f4 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -17,7 +17,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | True | If false, use iptables |
+| os_firewall_use_firewalld | False | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index 4c544122f..01859e5fc 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -2,6 +2,6 @@
os_firewall_enabled: True
# firewalld is not supported on Atomic Host
# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}"
+os_firewall_use_firewalld: "{{ False }}"
os_firewall_allow: []
os_firewall_deny: []
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 8d4878fa7..aeee3ede8 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
import subprocess
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 2b40eee1b..509655b0c 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -14,7 +14,7 @@
- iptables
- ip6tables
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling iptables
pause:
@@ -34,6 +34,12 @@
pause: seconds=10
when: result | changed
+- name: Restart polkitd
+ systemd:
+ name: polkit
+ state: restarted
+ when: result | changed
+
# Fix suspected race between firewalld and polkit BZ1436964
- name: Wait for polkit action to have been created
command: pkaction --action-id=org.fedoraproject.FirewallD1.config.info
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 38ea2477c..55f2fc471 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -7,7 +7,7 @@
enabled: no
masked: yes
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling firewalld
pause:
diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml
index 0bbeadd34..23d65c7ef 100644
--- a/roles/rhel_subscribe/meta/main.yml
+++ b/roles/rhel_subscribe/meta/main.yml
@@ -1,3 +1,2 @@
---
-dependencies:
- - role: openshift_facts
+dependencies: []
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 41673ee40..ea0c42150 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -7,7 +7,7 @@
when: deployment_type == 'enterprise'
- set_fact:
- default_ose_version: '3.4'
+ default_ose_version: '3.5'
when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
- set_fact:
@@ -16,10 +16,13 @@
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] )
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms"
+ --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
+ --enable="rhel-7-fast-datapath-rpms"
+ register: subscribe_repos
+ until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 28c3c7080..453044a6e 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -21,6 +21,11 @@
msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
when: rhel_subscription_pass is not defined
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
- name: Satellite preparation
command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
args:
@@ -57,5 +62,6 @@
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
- when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] and
- not openshift.common.is_atomic | bool
+ when:
+ - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
+ - not ostree_booted.stat.exists | bool
diff --git a/setup.py b/setup.py
index 2ad26110b..c6a132ae2 100644
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@ import os
import fnmatch
import re
import sys
+import subprocess
import yaml
# Always prefer setuptools over distutils
@@ -199,6 +200,52 @@ class OpenShiftAnsibleGenerateValidation(Command):
print('\nAll generate scripts passed.\n')
+class OpenShiftAnsibleSyntaxCheck(Command):
+ ''' Command to run Ansible syntax check'''
+ description = "Run Ansible syntax check"
+ user_options = []
+
+ # Colors
+ FAIL = '\033[91m' # Red
+ ENDC = '\033[0m' # Reset
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ pass
+
+ def run(self):
+ ''' run command '''
+
+ has_errors = False
+
+ for yaml_file in find_files(
+ os.path.join(os.getcwd(), 'playbooks', 'byo'),
+ None, None, r'\.ya?ml$'):
+ with open(yaml_file, 'r') as contents:
+ for line in contents:
+ # initialize_groups.yml is used to identify entry point playbooks
+ if re.search(r'initialize_groups\.yml', line):
+ print('-' * 60)
+ print('Syntax checking playbook: %s' % yaml_file)
+ try:
+ subprocess.check_output(
+ ['ansible-playbook', '-i localhost,',
+ '--syntax-check', yaml_file]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
+ # Break for loop, no need to continue looping lines
+ break
+ if has_errors:
+ raise SystemExit(1)
+
+
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
@@ -242,6 +289,7 @@ setup(
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
+ 'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
diff --git a/test-requirements.txt b/test-requirements.txt
index 805828e1c..585cca0b9 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,14 @@
+# Versions are pinned to prevent pypi releases arbitrarily breaking
+# tests with new APIs/semantics. We want to update versions deliberately.
+
# flake8 must be listed before pylint to avoid dependency conflicts
-flake8
-flake8-mutable
-flake8-print
-pylint
-setuptools-lint
-yamllint
-coverage
-mock
-pytest
-pytest-cov
+flake8==3.3.0
+flake8-mutable==1.1.0
+flake8-print==2.0.2
+pylint==1.6.5
+setuptools-lint==0.5.2
+yamllint==1.6.1
+coverage==4.3.4
+mock==2.0.0
+pytest==3.0.7
+pytest-cov==2.4.0
diff --git a/test/integration/README.md b/test/integration/README.md
new file mode 100644
index 000000000..948e44c50
--- /dev/null
+++ b/test/integration/README.md
@@ -0,0 +1,39 @@
+# Integration tests
+
+Integration tests exercise the OpenShift Ansible playbooks by running them
+against an inventory with Docker containers as hosts.
+
+## Requirements
+
+The tests assume that:
+
+* docker is running on localhost and the present user has access to use it.
+* golang is installed and the go binary is in PATH.
+* python and tox are installed.
+
+## Building images
+
+The tests rely on images built in the local docker index. You can build them
+from the repository root with:
+
+```
+./test/integration/build-images.sh
+```
+
+Use the `--help` option to view available options.
+
+## Running the tests
+
+From the repository root, run the integration tests with:
+
+```
+./test/integration/run-tests.sh
+```
+
+Use the `--help` option to view available options.
+
+You can also run tests more directly, for example to run a specific check:
+
+```
+go test ./test/integration/... -run TestPackageUpdateDepMissing
+```
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
new file mode 100755
index 000000000..74a55fa51
--- /dev/null
+++ b/test/integration/build-images.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+# This is intended to run either locally (in which case a push is not
+# necessary) or in a CI job (where the results should be pushed to a
+# registry for use in later CI test jobs). Images are tagged locally with
+# both the base name (e.g. "test-target-base") and with the prefix given;
+# then only the prefixed name is pushed if --push is specified, assuming
+# any necessary credentials are available for the push. The same prefix
+# can then be used for the testing script. By default a local (non-registry)
+# prefix is used and no push can occur. To push to e.g. dockerhub:
+#
+# ./build-images.sh --push --prefix=docker.io/openshift/ansible-integration-
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+source_root=$(dirname "${0}")
+
+prefix="${PREFIX:-openshift-ansible-integration-}"
+push=false
+verbose=false
+build_options="${DOCKER_BUILD_OPTIONS:-}"
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --push)
+ push=true
+ ;;
+ --no-cache)
+ build_options="${build_options} --no-cache"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+if [ "$help" = true ]; then
+ echo "Builds the docker images for openshift-ansible integration tests"
+ echo "and pushes them to a central registry."
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift-ansible-integration-"
+ echo
+ echo " --push"
+ echo " If set will push the tagged image"
+ echo
+ echo " --no-cache"
+ echo " If set will perform the build without a cache."
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+
+declare -a build_order ; declare -A images
+build_order+=( test-target-base ) ; images[test-target-base]=openshift_health_checker/builds/test-target-base
+build_order+=( preflight-aos-package-checks ); images[preflight-aos-package-checks]=openshift_health_checker/builds/aos-package-checks
+for image in "${build_order[@]}"; do
+ BUILD_STARTTIME=$(date +%s)
+ docker_tag=${prefix}${image}
+ echo
+ echo "--- Building component '$image' with docker tag '$docker_tag' ---"
+ docker build ${build_options} -t $image -t $docker_tag "$source_root/${images[$image]}"
+ echo
+ BUILD_ENDTIME=$(date +%s); echo "--- build $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+ if [ "$push" = true ]; then
+ docker push $docker_tag
+ PUSH_ENDTIME=$(date +%s); echo "--- push $docker_tag took $(($PUSH_ENDTIME - $BUILD_ENDTIME)) seconds ---"
+ fi
+done
+
+echo
+echo
+echo "++ Active images"
+docker images | grep ${prefix} | sort
+echo
+
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
new file mode 100644
index 000000000..0d8162c2e
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
@@ -0,0 +1,32 @@
+FROM test-target-base
+
+RUN yum install -y rpm-build rpmdevtools createrepo && \
+ rpmdev-setuptree && \
+ mkdir -p /mnt/localrepo
+ADD root /
+
+# we will build some RPMs that can be used to break yum update in tests.
+RUN cd /root/rpmbuild/SOURCES && \
+ mkdir break-yum-update-1.0 && \
+ tar zfc foo.tgz break-yum-update-1.0 && \
+ rpmbuild -bb /root/break-yum-update.spec && \
+ yum install -y /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-1.noarch.rpm && \
+ rpmbuild -bb /root/break-yum-update-2.spec && \
+ mkdir /mnt/localrepo/break-yum && \
+ cp /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-2.noarch.rpm /mnt/localrepo/break-yum && \
+ createrepo /mnt/localrepo/break-yum
+
+# we'll also build some RPMs that can be used to exercise OCP package version tests.
+RUN cd /root/rpmbuild/SOURCES && \
+ mkdir atomic-openshift-3.2 && \
+ mkdir atomic-openshift-3.3 && \
+ tar zfc ose.tgz atomic-openshift-3.{2,3} && \
+ rpmbuild -bb /root/ose-3.2.spec && \
+ rpmbuild -bb /root/ose-3.3.spec && \
+ mkdir /mnt/localrepo/ose-3.{2,3} && \
+ cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.2-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
+ createrepo /mnt/localrepo/ose-3.2 && \
+ cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.3-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
+ createrepo /mnt/localrepo/ose-3.3
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo
new file mode 100644
index 000000000..f5ccd2d19
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo
@@ -0,0 +1,5 @@
+[break-yum]
+name=break-yum
+baseurl=file:///mnt/localrepo/break-yum
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo
new file mode 100644
index 000000000..3064d6dbb
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo
@@ -0,0 +1,5 @@
+[ose-3.2]
+name=ose-3.2
+baseurl=file:///mnt/localrepo/ose-3.2
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo
new file mode 100644
index 000000000..1466da476
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo
@@ -0,0 +1,5 @@
+[ose-3.3]
+name=ose-3.3
+baseurl=file:///mnt/localrepo/ose-3.3
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec
new file mode 100644
index 000000000..ebd7eb443
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec
@@ -0,0 +1,33 @@
+Name: break-yum-update
+Version: 1.0
+Release: 2
+Summary: Package for breaking updates by requiring things that don't exist
+
+License: NA
+
+Requires: package-that-does-not-exist
+Source0: http://example.com/foo.tgz
+BuildArch: noarch
+
+%description
+Package for breaking updates by requiring things that don't exist
+
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec
new file mode 100644
index 000000000..c40675f90
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec
@@ -0,0 +1,32 @@
+Name: break-yum-update
+Version: 1.0
+Release: 1
+Summary: Package for breaking updates by requiring things that don't exist
+
+License: NA
+
+Source0: http://example.com/foo.tgz
+BuildArch: noarch
+
+%description
+Package for breaking updates by requiring things that don't exist
+
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
new file mode 100644
index 000000000..3b3eab696
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
@@ -0,0 +1,57 @@
+Name: atomic-openshift
+Version: 3.2
+Release: 1
+Summary: package the critical aos packages
+
+License: NA
+
+Source0: http://example.com/ose.tgz
+BuildArch: noarch
+
+%package master
+Summary: package the critical aos packages
+%package node
+Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
+
+%description
+Package for pretending to provide AOS
+
+%description master
+Package for pretending to provide AOS
+
+%description node
+Package for pretending to provide AOS
+
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%files master
+%files node
+%files -n openvswitch
+%files -n docker
+
+%doc
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
new file mode 100644
index 000000000..66be0a862
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
@@ -0,0 +1,57 @@
+Name: atomic-openshift
+Version: 3.3
+Release: 1
+Summary: package the critical aos packages
+
+License: NA
+
+Source0: http://example.com/ose.tgz
+BuildArch: noarch
+
+%package master
+Summary: package the critical aos packages
+%package node
+Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
+
+%description
+Package for pretending to provide AOS
+
+%description master
+Package for pretending to provide AOS
+
+%description node
+Package for pretending to provide AOS
+
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%files master
+%files node
+%files -n openvswitch
+%files -n docker
+
+%doc
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile
new file mode 100644
index 000000000..39b33c057
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile
@@ -0,0 +1,2 @@
+FROM centos/systemd
+RUN yum install -y iproute python-dbus PyYAML yum-utils
diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go
new file mode 100644
index 000000000..a92d6861d
--- /dev/null
+++ b/test/integration/openshift_health_checker/common.go
@@ -0,0 +1,99 @@
+package test
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+// A PlaybookTest executes a given Ansible playbook and checks the exit code and
+// output contents.
+type PlaybookTest struct {
+ // inputs
+ Path string
+ // expected outputs
+ ExitCode int
+ Output []string // zero or more strings that should be in the output
+}
+
+// Run runs the PlaybookTest.
+func (p PlaybookTest) Run(t *testing.T) {
+ // A PlaybookTest is intended to be run in parallel with other tests.
+ t.Parallel()
+
+ cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path)
+ cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1")
+ b, err := cmd.CombinedOutput()
+
+ // Check exit code.
+ if (err == nil) && (p.ExitCode != 0) {
+ p.checkExitCode(t, 0, p.ExitCode, cmd, b)
+ }
+ if (err != nil) && (p.ExitCode == 0) {
+ got, ok := getExitCode(err)
+ if !ok {
+ t.Logf("unexpected error (%T): %[1]v", err)
+ p.logCmdAndOutput(t, cmd, b)
+ t.FailNow()
+ }
+ p.checkExitCode(t, got, p.ExitCode, cmd, b)
+ }
+
+ // Check output contents.
+ var missing []string
+ for _, s := range p.Output {
+ if !bytes.Contains(b, []byte(s)) {
+ missing = append(missing, s)
+ }
+ }
+ if len(missing) > 0 {
+ t.Logf("missing in output: %q", missing)
+ p.logCmdAndOutput(t, cmd, b)
+ t.FailNow()
+ }
+}
+
+// getExitCode returns an exit code and true if the exit code could be taken
+// from err, false otherwise.
+// The implementation is GOOS-specific, and currently only supports Linux.
+func getExitCode(err error) (int, bool) {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ return -1, false
+ }
+ waitStatus, ok := exitErr.Sys().(syscall.WaitStatus)
+ if !ok {
+ return -1, false
+ }
+ return waitStatus.ExitStatus(), true
+}
+
+// checkExitCode marks the test as failed when got is different than want.
+func (p PlaybookTest) checkExitCode(t *testing.T, got, want int, cmd *exec.Cmd, output []byte) {
+ if got == want {
+ return
+ }
+ t.Logf("got exit code %v, want %v", got, want)
+ p.logCmdAndOutput(t, cmd, output)
+ t.FailNow()
+}
+
+// logCmdAndOutput logs how to re-run a command and a summary of the output of
+// its last execution for debugging.
+func (p PlaybookTest) logCmdAndOutput(t *testing.T, cmd *exec.Cmd, output []byte) {
+ const maxLines = 10
+ lines := bytes.Split(bytes.TrimRight(output, "\n"), []byte("\n"))
+ if len(lines) > maxLines {
+ lines = append([][]byte{[]byte("...")}, lines[len(lines)-maxLines:len(lines)]...)
+ }
+ output = bytes.Join(lines, []byte("\n"))
+ dir, err := filepath.Abs(cmd.Dir)
+ if err != nil {
+ panic(err)
+ }
+ t.Logf("\n$ (cd %s && %s)\n%s", dir, strings.Join(cmd.Args, " "), output)
+}
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
new file mode 100644
index 000000000..a557282b4
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -0,0 +1,32 @@
+---
+# NOTE: this test is probably superfluous since openshift_version already does it
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+
+- name: Fail as required packages cannot be installed
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ # put the repo back to disabled
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2", repo_enabled: 0 }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_availability' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
new file mode 100644
index 000000000..16ff41673
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -0,0 +1,20 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: origin
+
+- name: Succeeds as Origin packages are public
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_availability' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
new file mode 100644
index 000000000..0929b73ce
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -0,0 +1,31 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when a dependency required for update is missing
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
new file mode 100644
index 000000000..f8790358a
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -0,0 +1,38 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when a repo definition is completely broken
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - name: Break the break-yum repo
+ replace:
+ dest: /etc/yum.repos.d/break-yum.repo
+ backup: no
+ regexp: "^baseurl"
+ replace: "#baseurl"
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
new file mode 100644
index 000000000..e2bb84715
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -0,0 +1,28 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Succeeds when nothing blocks a yum update
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
new file mode 100644
index 000000000..28efdd81d
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -0,0 +1,34 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when repo content is not available
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - name: Remove the local repo entirely
+ file: path=/mnt/localrepo state=absent
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
new file mode 100644
index 000000000..58bed0fc0
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -0,0 +1,32 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Success when AOS version matches openshift_release
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ # disable extras so we control docker version
+ - include: tasks/enable_repo.yml
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
new file mode 100644
index 000000000..c26413009
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -0,0 +1,35 @@
+---
+# NOTE: this test is probably superfluous since openshift_version already does it
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+ openshift_release: 3.3
+
+- name: Failure when AOS version doesn't match openshift_release
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ # put the repo back to disabled
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3", repo_enabled: 0 }
+ # test with wrong repo enabled
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
new file mode 100644
index 000000000..850a55a72
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -0,0 +1,35 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+
+- name: Fails when multiple AOS versions are available
+ hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ roles:
+ - openshift_health_checker
+
+ post_tasks:
+ - block:
+
+ # enable repo with extra minor version available
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3" }
+
+ # disable extras so we control docker version
+ - include: tasks/enable_repo.yml
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
new file mode 100644
index 000000000..da3f6b844
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
@@ -0,0 +1,20 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: origin
+
+- name: Succeeds with Origin although multiple versions are available
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/test/integration/openshift_health_checker/preflight/playbooks/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
+++ b/test/integration/openshift_health_checker/preflight/playbooks/roles
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
new file mode 100644
index 000000000..6022f4289
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
@@ -0,0 +1,9 @@
+---
+- name: Enable {{ repo_name }} repo
+ # believe it or not we can't use the yum_repository module for this.
+ # https://github.com/ansible/ansible-modules-extras/issues/2384
+ ini_file:
+ dest: /etc/yum.repos.d/{{ repo_file | default(repo_name) }}.repo
+ section: "{{ repo_name }}"
+ option: enabled
+ value: "{{ repo_enabled | default(1) }}"
diff --git a/test/integration/openshift_health_checker/preflight/preflight_test.go b/test/integration/openshift_health_checker/preflight/preflight_test.go
new file mode 100644
index 000000000..9dfd713ec
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/preflight_test.go
@@ -0,0 +1,105 @@
+package preflight
+
+import (
+ "testing"
+
+ . ".."
+)
+
+func TestPackageUpdateDepMissing(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_dep_missing.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Could not perform a yum update.",
+ "break-yum-update-1.0-2.noarch requires package-that-does-not-exist",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoBroken(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_broken.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Error with yum repository configuration: Cannot find a valid baseurl for repo",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoDisabled(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_disabled.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_update",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoUnreachable(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_unreachable.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Error getting data from at least one yum repository",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMatches(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_matches.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_version",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMismatches(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_mismatches.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_version\":",
+ "Not all of the required packages are available at their requested version",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMultiple(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_multiple.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_version\":",
+ "Multiple minor versions of these packages are available",
+ },
+ }.Run(t)
+}
+
+func TestPackageAvailabilityMissingRequired(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_availability_missing_required.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_availability\":",
+ "Cannot install all of the necessary packages.",
+ "atomic-openshift",
+ },
+ }.Run(t)
+}
+
+func TestPackageAvailabilitySucceeds(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_availability_succeeds.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_availability",
+ },
+ }.Run(t)
+}
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
new file mode 100644
index 000000000..33e94cf1f
--- /dev/null
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -0,0 +1,48 @@
+---
+# Include this play once for each container you want to create and use as a test host.
+#
+# Optional parameters on the include are as follows:
+# * scenario = unique name for the container to be started
+# * image = name of the image to start in the container
+# * command = command to run in the container
+# * l_groups = host groups that the container should be added to
+# * l_host_vars = any variables that should be added to the host
+
+- name: Start container for specified test host
+ gather_facts: no
+ hosts: localhost
+ connection: local
+ tasks:
+
+ - set_fact:
+ # This is a little weird but if we use a var instead of a fact,
+ # a different random value is generated for each task. See:
+ # https://opensolitude.com/2015/05/27/ansible-lookups-variables-vs-facts.html
+ container_name: openshift_ansible_test_{{ scenario | default(100000000000000 | random) }}
+
+ - name: start container
+ docker_container:
+ name: "{{ container_name }}"
+ image: "{{ lookup('env', 'IMAGE_PREFIX') | default('openshift-ansible-integration-', true) }}{{ image | default('test-target-base') }}"
+ command: "{{ command | default('sleep 1800') }}"
+ recreate: yes
+ # NOTE: When/if we need to run containers that are docker hosts as well:
+ # volumes: [ "/var/run/docker.sock:/var/run/docker.sock:z" ]
+
+ - name: add container as host in inventory
+ add_host:
+ ansible_connection: docker
+ name: "{{ container_name }}"
+ groups: '{{ l_groups | default("masters,nodes,etcd") }}'
+
+ # There ought to be a better way to transfer the host vars, but see:
+ # https://groups.google.com/forum/#!topic/Ansible-project/Jwx8RYhqxPA
+ - name: set host facts per test parameters
+ set_fact:
+ "{{ item.key }}": "{{ item.value }}"
+ delegate_facts: True
+ delegate_to: "{{ container_name }}"
+ with_dict: "{{ l_host_vars | default({}) }}"
+
+- include: ../../../playbooks/byo/openshift-cluster/initialize_groups.yml
+- include: ../../../playbooks/common/openshift-cluster/evaluate_groups.yml
diff --git a/test/integration/openshift_health_checker/teardown_container.yml b/test/integration/openshift_health_checker/teardown_container.yml
new file mode 100644
index 000000000..e84fee1f5
--- /dev/null
+++ b/test/integration/openshift_health_checker/teardown_container.yml
@@ -0,0 +1,24 @@
+---
+
+# Include this to delete the current test host container.
+#
+# In order to recover from test exceptions, this cleanup is expected to
+# be done in an "always:" task on the same block as the test task(s). So
+# it happens in a task "on" the host being tested. In order to delete the
+# host's container, the task uses its own hostname (which is same as the
+# container name) but delegates the docker action to localhost.
+
+- block:
+
+ # so handlers don't break the test by trying to run after teardown:
+ - meta: flush_handlers
+
+ always:
+
+ - name: delete test container
+ delegate_to: localhost
+ connection: local
+ docker_container:
+ name: "{{ inventory_hostname }}"
+ state: absent
+ failed_when: False
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
new file mode 100755
index 000000000..680b64602
--- /dev/null
+++ b/test/integration/run-tests.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# This script runs the golang integration tests in the directories underneath.
+# It should be run from the same directory it is in, or in a directory above.
+# Specify the same image prefix used (if any) with build-images.sh
+#
+# Example:
+# ./run-tests.sh --prefix=docker.io/openshift/ansible-integration- --parallel=16
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source_root=$(dirname "${0}")
+
+prefix="${PREFIX:-openshift-ansible-integration-}"
+gotest_options="${GOTEST_OPTIONS:--v}"
+push=false
+verbose=false
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --parallel=*)
+ gotest_options="${gotest_options} -parallel ${args#*=}"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+if [ "$help" = true ]; then
+ echo "Runs the openshift-ansible integration tests."
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift-ansible-integration-"
+ echo
+ echo " --parallel=NUMBER"
+ echo " Number of tests to run in parallel."
+ echo " default: GOMAXPROCS (typically, number of processors)"
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+
+
+if ! [ -d $source_root/../../.tox/integration ]; then
+ # have tox create a consistent virtualenv
+ pushd $source_root/../..; tox -e integration; popd
+fi
+# use the virtualenv from tox
+set +o nounset; source $source_root/../../.tox/integration/bin/activate; set -o nounset
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+# Run the tests. NOTE: "go test" requires a relative path for this purpose.
+# The PWD trick below will only work if cwd is in/above where this script lives.
+retval=0
+IMAGE_PREFIX="${prefix}" env -u GOPATH \
+ go test ./${source_root#$PWD}/... ${gotest_options}
+
+
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
index 52e9a9888..393a4d6ba 100644
--- a/test/openshift_version_tests.py
+++ b/test/openshift_version_tests.py
@@ -44,7 +44,7 @@ class OpenShiftVersionTests(unittest.TestCase):
{'name': 'oo_version_gte_3_5_or_1_5',
'positive_enterprise_version': '3.6.0',
'negative_enterprise_version': '3.4.0',
- 'positive_origin_version': '1.6.0',
+ 'positive_origin_version': '3.6.0',
'negative_origin_version': '1.4.0'}]
def test_legacy_gte_filters(self):
diff --git a/test/modify_yaml_tests.py b/test/unit/modify_yaml_tests.py
index 0dc25df82..65b2db44c 100644
--- a/test/modify_yaml_tests.py
+++ b/test/unit/modify_yaml_tests.py
@@ -5,7 +5,7 @@ import os
import sys
import unittest
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../library/")] + sys.path
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../../library/")] + sys.path
# pylint: disable=import-error
from modify_yaml import set_key # noqa: E402
diff --git a/tox.ini b/tox.ini
index 643fa774d..cc17377ea 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,6 +3,7 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
+ integration
skipsdist=True
skip_missing_interpreters=True
@@ -11,7 +12,8 @@ skip_install=True
deps =
-rrequirements.txt
-rtest-requirements.txt
- py35-flake8: flake8-bugbear
+ py35-flake8: flake8-bugbear==17.3.0
+ integration: docker-py==1.10.6
commands =
unit: pip install -e utils
@@ -21,4 +23,5 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml
+ ansible_syntax: python setup.py ansible_syntax
+ integration: python -c 'print "run test/integration/run-tests.sh"'
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
index 3425e7e62..f7e6fe2ff 100644
--- a/utils/etc/ansible.cfg
+++ b/utils/etc/ansible.cfg
@@ -28,3 +28,9 @@ deprecation_warnings = False
# remote_tmp - set if provided by user (cli)
# ssh_args - set if provided by user (cli)
# control_path
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index c881e4b92..433e29dde 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -7,6 +7,12 @@ import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import u
+except ImportError:
+ from ansible.module_utils.six import u
+
# pylint: disable=super-init-not-called
class CallbackModule(CallbackBase):
@@ -39,10 +45,10 @@ class CallbackModule(CallbackBase):
facts = abridged_result['result']['ansible_facts']['openshift']
hosts_yaml = {}
hosts_yaml[res._host.get_name()] = facts
- to_dump = yaml.dump(hosts_yaml,
- allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper)
+ to_dump = u(yaml.dump(hosts_yaml,
+ allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper))
os.write(self.hosts_yaml, to_dump)
def v2_runner_on_skipped(self, res):
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index a6d784dea..71dcf87aa 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -34,6 +34,12 @@ UPGRADE_MAPPINGS = {
'3.5': {
'minor_version': '3.5',
'minor_playbook': 'v3_5/upgrade.yml',
+ 'major_playbook': 'v3_6/upgrade.yml',
+ 'major_version': '3.6',
+ },
+ '3.6': {
+ 'minor_version': '3.6',
+ 'minor_playbook': 'v3_6/upgrade.yml',
},
}
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index f25266f29..1574d447a 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -39,18 +39,19 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
- Version('3.5', 'openshift-enterprise'),
+ Version('3.6', 'openshift-enterprise'),
])
REG = Variant('openshift-enterprise', 'Registry', [
- Version('3.4', 'openshift-enterprise', 'registry'),
+ Version('3.6', 'openshift-enterprise', 'registry'),
])
origin = Variant('origin', 'OpenShift Origin', [
- Version('1.4', 'origin'),
+ Version('3.6', 'origin'),
])
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.5', 'openshift-enterprise'),
Version('3.4', 'openshift-enterprise'),
Version('3.3', 'openshift-enterprise'),
Version('3.2', 'openshift-enterprise'),