summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.tito/packages/.readme3
-rw-r--r--.tito/packages/openshift-ansible1
-rw-r--r--.tito/packages/openshift-ansible-bin1
-rw-r--r--.tito/packages/openshift-ansible-inventory1
-rw-r--r--.tito/releasers.conf13
-rw-r--r--.tito/tito.props5
-rw-r--r--README.md4
-rw-r--r--README_AEP.md37
-rw-r--r--README_AWS.md4
-rw-r--r--README_OSE.md3
-rw-r--r--README_vagrant.md1
-rw-r--r--bin/README_SHELL_COMPLETION2
-rw-r--r--bin/openshift-ansible-bin.spec122
-rw-r--r--bin/openshift_ansible.conf.example2
-rw-r--r--bin/openshift_ansible/awsutil.py11
l---------bin/openshift_ansible/multi_ec2.py1
l---------bin/openshift_ansible/multi_inventory.py1
-rwxr-xr-xbin/ossh_bash_completion20
-rw-r--r--bin/ossh_zsh_completion10
-rw-r--r--bin/zsh_functions/_ossh4
-rw-r--r--filter_plugins/oo_filters.py80
-rw-r--r--filter_plugins/oo_zabbix_filters.py51
-rwxr-xr-xgit/pylint.sh2
-rw-r--r--inventory/byo/hosts.example76
-rwxr-xr-xinventory/gce/hosts/gce.py32
-rw-r--r--inventory/multi_ec2.yaml.example32
-rwxr-xr-xinventory/multi_inventory.py (renamed from inventory/multi_ec2.py)154
-rw-r--r--inventory/multi_inventory.yaml.example51
-rw-r--r--inventory/openshift-ansible-inventory.spec108
-rw-r--r--openshift-ansible.spec542
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml17
-rw-r--r--playbooks/adhoc/uninstall.yml38
l---------playbooks/adhoc/upgrades/filter_plugins1
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml8
-rw-r--r--playbooks/byo/openshift-cluster/config.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md (renamed from playbooks/adhoc/upgrades/README.md)10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/config.yml67
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml76
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml16
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check188
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh10
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py154
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/roles1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml (renamed from playbooks/adhoc/upgrades/upgrade.yml)28
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml429
-rw-r--r--playbooks/common/openshift-etcd/config.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml115
-rw-r--r--playbooks/common/openshift-node/config.yml78
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml2
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data6
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml1
-rw-r--r--roles/etcd/README.md2
-rw-r--r--roles/etcd/defaults/main.yaml8
-rw-r--r--roles/etcd/handlers/main.yml1
-rw-r--r--roles/etcd/meta/main.yml2
-rw-r--r--roles/etcd/tasks/main.yml12
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/etcd_ca/meta/main.yml2
-rw-r--r--roles/etcd_ca/tasks/main.yml30
-rw-r--r--roles/etcd_ca/templates/openssl_append.j230
-rw-r--r--roles/etcd_ca/vars/main.yml3
-rw-r--r--roles/etcd_certificates/tasks/client.yml2
-rw-r--r--roles/etcd_certificates/tasks/main.yml3
-rw-r--r--roles/etcd_certificates/tasks/server.yml10
-rw-r--r--roles/etcd_certificates/vars/main.yml11
-rw-r--r--roles/etcd_common/README.md34
-rw-r--r--roles/etcd_common/defaults/main.yml30
-rw-r--r--roles/etcd_common/meta/main.yml16
-rw-r--r--roles/etcd_common/tasks/main.yml13
-rw-r--r--roles/etcd_common/templates/host_int_map.j213
-rw-r--r--roles/flannel/README.md45
-rw-r--r--roles/flannel/defaults/main.yaml8
-rw-r--r--roles/flannel/handlers/main.yml8
-rw-r--r--roles/flannel/meta/main.yml16
-rw-r--r--roles/flannel/tasks/main.yml43
-rw-r--r--roles/flannel_register/README.md47
-rw-r--r--roles/flannel_register/defaults/main.yaml11
-rw-r--r--roles/flannel_register/meta/main.yml16
-rw-r--r--roles/flannel_register/tasks/main.yml14
-rw-r--r--roles/flannel_register/templates/flannel-config.json8
-rw-r--r--roles/haproxy/README.md34
-rw-r--r--roles/haproxy/defaults/main.yml21
-rw-r--r--roles/haproxy/handlers/main.yml5
-rw-r--r--roles/haproxy/meta/main.yml14
-rw-r--r--roles/haproxy/tasks/main.yml25
-rw-r--r--roles/haproxy/templates/haproxy.cfg.j276
-rw-r--r--roles/kube_nfs_volumes/README.md3
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml13
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 (renamed from roles/kube_nfs_volumes/templates/nfs.json.j2)0
-rw-r--r--roles/lib_zabbix/library/zbx_item.py37
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py39
-rw-r--r--roles/lib_zabbix/library/zbx_itservice.py263
-rw-r--r--roles/lib_zabbix/library/zbx_trigger.py5
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml6
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml23
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j226
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh15
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json225
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json211
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer.json11
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json14
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs.json12
-rw-r--r--roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json63
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json325
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json)160
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json343
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq6.json)171
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json)223
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json)216
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json)133
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json)153
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json)208
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json)191
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json)201
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json)194
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json)200
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json)185
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json)90
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json)110
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json)168
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json)188
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json)171
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json)191
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json)162
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json)145
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json)90
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json)110
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json)191
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json)151
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json)194
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json)154
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json)185
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json)180
-rw-r--r--roles/openshift_examples/tasks/main.yml66
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py214
-rw-r--r--roles/openshift_facts/tasks/main.yml5
-rw-r--r--roles/openshift_master/handlers/main.yml12
-rw-r--r--roles/openshift_master/tasks/main.yml126
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.service.j221
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.service.j222
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j253
-rw-r--r--roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j27
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_ca/tasks/main.yml5
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml23
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml5
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml30
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml5
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml12
-rw-r--r--roles/openshift_node/tasks/storage_plugins/main.yml13
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml7
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--roles/os_zabbix/tasks/main.yml18
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_agent.yml4
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_server.yml60
-rw-r--r--roles/os_zabbix/vars/template_aws.yml25
-rw-r--r--roles/os_zabbix/vars/template_docker.yml5
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml82
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml52
-rw-r--r--roles/os_zabbix/vars/template_performance_copilot.yml14
-rw-r--r--test/units/README.md2
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/mutli_ec2_test.py95
-rw-r--r--utils/.gitignore45
-rw-r--r--utils/README.txt24
-rw-r--r--utils/docs/config.md80
-rw-r--r--utils/etc/ansible.cfg25
-rw-r--r--utils/setup.cfg5
-rw-r--r--utils/setup.py85
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh86
-rw-r--r--utils/site_assets/oo_install_launcher.README.txt22
-rw-r--r--utils/src/DESCRIPTION.rst13
-rw-r--r--utils/src/MANIFEST.in9
-rw-r--r--utils/src/data/data_file1
-rw-r--r--utils/src/ooinstall/__init__.py5
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py88
-rw-r--r--utils/src/ooinstall/cli_installer.py606
-rw-r--r--utils/src/ooinstall/oo_config.py218
-rw-r--r--utils/src/ooinstall/openshift_ansible.py179
-rw-r--r--utils/src/ooinstall/variants.py77
-rw-r--r--utils/test/__init__.py0
-rw-r--r--utils/test/cli_installer_tests.py629
-rw-r--r--utils/test/oo_config_tests.py228
-rw-r--r--utils/workflows/enterprise_deploy/openshift.sh2
226 files changed, 10961 insertions, 2832 deletions
diff --git a/.gitignore b/.gitignore
index cacc711a1..8f46c269f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,4 +15,5 @@
.DS_Store
gce.ini
multi_ec2.yaml
+multi_inventory.yaml
.vagrant
diff --git a/.tito/packages/.readme b/.tito/packages/.readme
new file mode 100644
index 000000000..b9411e2d1
--- /dev/null
+++ b/.tito/packages/.readme
@@ -0,0 +1,3 @@
+the .tito/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
new file mode 100644
index 000000000..c2f5784ce
--- /dev/null
+++ b/.tito/packages/openshift-ansible
@@ -0,0 +1 @@
+3.0.12-1 ./
diff --git a/.tito/packages/openshift-ansible-bin b/.tito/packages/openshift-ansible-bin
new file mode 100644
index 000000000..5275dfcf9
--- /dev/null
+++ b/.tito/packages/openshift-ansible-bin
@@ -0,0 +1 @@
+0.0.21-1 bin/
diff --git a/.tito/packages/openshift-ansible-inventory b/.tito/packages/openshift-ansible-inventory
new file mode 100644
index 000000000..85502438a
--- /dev/null
+++ b/.tito/packages/openshift-ansible-inventory
@@ -0,0 +1 @@
+0.0.11-1 inventory/
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
new file mode 100644
index 000000000..f863ce9b1
--- /dev/null
+++ b/.tito/releasers.conf
@@ -0,0 +1,13 @@
+[brew]
+releaser = tito.release.DistGitReleaser
+branches = libra-rhel-7
+
+[ose-3.0]
+releaser = tito.release.DistGitReleaser
+branches = rhose-3.0-rhel-7
+srpm_disttag = .el7ose
+
+[aos-3.1]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.1-rhel-7
+srpm_disttag = .el7aos
diff --git a/.tito/tito.props b/.tito/tito.props
new file mode 100644
index 000000000..eab3f190d
--- /dev/null
+++ b/.tito/tito.props
@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)
diff --git a/README.md b/README.md
index 489f9b8e9..635df36a0 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-#Openshift and Atomic Enterprise Ansible
+#OpenShift and Atomic Enterprise Ansible
-This repo contains Ansible code for Openshift and Atomic Enterprise.
+This repo contains Ansible code for OpenShift and Atomic Enterprise.
##Setup
- Install base dependencies:
diff --git a/README_AEP.md b/README_AEP.md
index e29888617..83e575ebe 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -76,39 +76,30 @@ ansible_ssh_user=root
# If ansible_ssh_user is not root, ansible_sudo must be set to true
#ansible_sudo=true
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
+# See DEPLOYMENT_TYPES.md
+deployment_type=atomic-enterprise
-# Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+# Pre-release registry URL; note that in the future these images
+# may have an atomicenterprise/aep- prefix or so.
+oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
-'baseurl':
-'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
-'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
-'OpenShift Origin COPR', 'baseurl':
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
-'enabled': 1, 'gpgcheck': 1, gpgkey:
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
# host group for masters
[masters]
-ose3-master.example.com
+aep3-master.example.com
# host group for nodes
[nodes]
-ose3-node[1:2].example.com
+aep3-node[1:2].example.com
```
The hostnames above should resolve both from the hosts themselves and
the host where ansible is running (if different).
## Running the ansible playbooks
-From the atomic-enterprise-ansible checkout run:
+From the openshift-ansible checkout run:
```sh
ansible-playbook playbooks/byo/config.yml
```
@@ -120,16 +111,18 @@ inventory file use the -i option for ansible-playbook.
On the master host:
```sh
oadm router --create=true \
- --credentials=/etc/openshift/master/openshift-router.kubeconfig \
- --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}'
+ --service-account=router \
+ --credentials=/etc/origin/master/openshift-router.kubeconfig \
+ --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}'
```
#### Create the default docker-registry
On the master host:
```sh
oadm registry --create=true \
- --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
- --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \
+ --service-account=registry \
+ --credentials=/etc/origin/master/openshift-registry.kubeconfig \
+ --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}' \
--mount-host=/var/lib/openshift/docker-registry
```
diff --git a/README_AWS.md b/README_AWS.md
index 3a5790eb3..6757e2892 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -38,8 +38,8 @@ You may also want to allow access from the outside world on the following ports:
• 80 - Web Apps
• 443 - Web Apps (https)
• 4789 - SDN / VXLAN
-• 8443 - Openshift Console
-• 10250 - kubelet
+• 8443 - OpenShift Console
+• 10250 - kubelet
```
diff --git a/README_OSE.md b/README_OSE.md
index 79ad07044..524950d51 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -79,9 +79,6 @@ ansible_ssh_user=root
# To deploy origin, change deployment_type to origin
deployment_type=enterprise
-# Pre-release registry URL
-oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
-
# Pre-release additional repo
openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
'baseurl':
diff --git a/README_vagrant.md b/README_vagrant.md
index 8e9946dc2..73fd31476 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -1,5 +1,6 @@
Requirements
------------
+- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
- vagrant (tested against version 1.7.2)
- vagrant-hostmanager plugin (tested against version 1.5.0)
- vagrant-libvirt (tested against version 0.0.26)
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
index 5f05df7fc..49bba3acc 100644
--- a/bin/README_SHELL_COMPLETION
+++ b/bin/README_SHELL_COMPLETION
@@ -14,7 +14,7 @@ will populate the cache file and the completions should
become available.
This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
It will then parse a few {host}.{env} out of the json
and return them to be completable.
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
deleted file mode 100644
index d90810bc3..000000000
--- a/bin/openshift-ansible-bin.spec
+++ /dev/null
@@ -1,122 +0,0 @@
-Summary: OpenShift Ansible Scripts for working with metadata hosts
-Name: openshift-ansible-bin
-Version: 0.0.19
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2, openshift-ansible-inventory
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}%{_bindir}
-mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
-mkdir -p %{buildroot}/etc/bash_completion.d
-mkdir -p %{buildroot}/etc/openshift_ansible
-
-cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-
-# Make it so we can load multi_ec2.py as a library.
-rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
-ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
-ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
-
-cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
-
-cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
-
-%files
-%{_bindir}/*
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-%changelog
-* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.19-1
-- Updated to show private ips when doing a list (kwoodson@redhat.com)
-- Updated to read config first and default to users home dir
- (kwoodson@redhat.com)
-- Prevent Ansible from serializing tasks (lhuard@amadeus.com)
-- Infra node support (whearn@redhat.com)
-- Playbook updates for clustered etcd (jdetiber@redhat.com)
-- bin/cluster supports boto credentials as well as env variables
- (jdetiber@redhat.com)
-- Merge pull request #291 from lhuard1A/profile
- (twiest@users.noreply.github.com)
-- Add a generic mechanism for passing options (lhuard@amadeus.com)
-- Infrastructure - Validate AWS environment before calling playbooks
- (jhonce@redhat.com)
-- Add a --profile option to spot which task takes more time
- (lhuard@amadeus.com)
-- changed Openshift to OpenShift (twiest@redhat.com)
-
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * Update defaults and examples to track core concepts guide
- (jhonce@redhat.com)
-- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com)
-- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
-- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
-- Adding cache location for multi ec2 (kwoodson@redhat.com)
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
-- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
-
-* Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
-- fixed opssh and opscp to allow just environment or just host-type.
- (twiest@redhat.com)
-
-* Mon May 04 2015 Thomas Wiest <twiest@redhat.com> 0.0.11-1
-- changed opssh to a bash script using ohi to make it easier to maintain, and
- to expose all of the pssh features directly. (twiest@redhat.com)
-- Added --user option to ohi to pre-pend the username in the hostlist output.
- (twiest@redhat.com)
-- Added utils.py that contains a normalize_dnsname function good for sorting
- dns names to a human readable list. (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.10-1
-- added --list-host-types option to opscp (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.9-1
-- added opscp (twiest@redhat.com)
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
-- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
-
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
-- added the ability to run opssh and ohi on all hosts in an environment, as
- well as all hosts of the same host-type regardless of environment
- (twiest@redhat.com)
-- added ohi (twiest@redhat.com)
-* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- fixed bug where opssh would throw an exception if pssh returned a non-zero
- exit code (twiest@redhat.com)
-
-* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- fixed the opssh default output behavior to be consistent with pssh. Also
- fixed a bug in how directories are named for --outdir and --errdir.
- (twiest@redhat.com)
-* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- created a python package named openshift_ansible (twiest@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
index e891b855a..8786dfc13 100644
--- a/bin/openshift_ansible.conf.example
+++ b/bin/openshift_ansible.conf.example
@@ -1,5 +1,5 @@
#[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
#[host_type_aliases]
#host-type-one = aliasa,aliasb
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 9df034f57..45345007c 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -4,7 +4,10 @@
import os
import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
- mec2 = multi_ec2.MultiEc2(args)
- mec2.run()
- return mec2.result
+ minv = multi_inventory.MultiInventory(args)
+ minv.run()
+ return minv.result
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py
deleted file mode 120000
index 660a0418e..000000000
--- a/bin/openshift_ansible/multi_ec2.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_ec2.py \ No newline at end of file
diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py
new file mode 120000
index 000000000..b40feec07
--- /dev/null
+++ b/bin/openshift_ansible/multi_inventory.py
@@ -0,0 +1 @@
+../../inventory/multi_inventory.py \ No newline at end of file
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 5072161f0..997ff0f9c 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 44500c618..3c4018636 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index 7c6cb7b0b..d205e1055 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -1,8 +1,8 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index a57b0f895..9a17913c4 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
from ansible import errors
from operator import itemgetter
+import OpenSSL.crypto
+import os.path
import pdb
import re
import json
@@ -241,6 +243,21 @@ class FilterModule(object):
return string.split(separator)
@staticmethod
+ def oo_haproxy_backend_masters(hosts):
+ ''' This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ '''
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server_port = host_info['openshift']['master']['api_port']
+ server['address'] = "%s:%s" % (server_ip, server_port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+ @staticmethod
def oo_filter_list(data, filter_attr=None):
''' This returns a list, which contains all items where filter_attr
evaluates to true
@@ -258,7 +275,7 @@ class FilterModule(object):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str")
# Gather up the values for the list of keys passed in
- return [x for x in data if x[filter_attr]]
+ return [x for x in data if x.has_key(filter_attr) and x[filter_attr]]
@staticmethod
def oo_parse_heat_stack_outputs(data):
@@ -327,6 +344,63 @@ class FilterModule(object):
return revamped_outputs
+ @staticmethod
+ # pylint: disable=too-many-branches
+ def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+ ''' Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key" }]
+
+ returns [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/custom2.crt",
+ "keyfile": "/etc/origin/master/custom2.key",
+ "names": [ "some-hostname.com" ] }]
+ '''
+ if not issubclass(type(certificates), list):
+ raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+ if not issubclass(type(data_dir), unicode):
+ raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+ if not issubclass(type(internal_hostnames), list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ certificate['names'] = list(set(certificate['names']))
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+ return certificates
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -342,5 +416,7 @@ class FilterModule(object):
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+ "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+ "oo_parse_certificate_names": self.oo_parse_certificate_names,
+ "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
}
diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py
index c44b874e8..fcfe43777 100644
--- a/filter_plugins/oo_zabbix_filters.py
+++ b/filter_plugins/oo_zabbix_filters.py
@@ -95,6 +95,54 @@ class FilterModule(object):
return data
+ @staticmethod
+ def itservice_results_builder(data, clusters, keys):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for cluster in clusters:
+ for results in data:
+ if cluster == results['item'][0]:
+ results = results['results']
+ if results and len(results) > 0 and all([results[0].has_key(_key) for _key in keys]):
+ tmp = {}
+ tmp['clusterid'] = cluster
+ for key in keys:
+ tmp[key] = results[0][key]
+ r_list.append(tmp)
+
+ return r_list
+
+ @staticmethod
+ def itservice_dependency_builder(data, cluster):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for dep in data:
+ if cluster == dep['clusterid']:
+ r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'})
+
+ return r_list
+
+ @staticmethod
+ def itservice_dep_builder_list(data):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for dep in data:
+ r_list.append({'name': '%s' % dep, 'dep_type': 'hard'})
+
+ return r_list
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -105,4 +153,7 @@ class FilterModule(object):
"create_data": self.create_data,
"oo_build_zabbix_collect": self.oo_build_zabbix_collect,
"oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
+ "itservice_results_builder": self.itservice_results_builder,
+ "itservice_dependency_builder": self.itservice_dependency_builder,
+ "itservice_dep_builder_list": self.itservice_dep_builder_list,
}
diff --git a/git/pylint.sh b/git/pylint.sh
index 55e8b6131..f29c055dc 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do
fi
done
+export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
+
if [ "${FILES_TO_TEST}" != "" ]; then
echo "Testing files: ${FILES_TO_TEST}"
exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 6b366cf87..56bbb9612 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -5,6 +5,7 @@
masters
nodes
etcd
+lb
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
@@ -24,7 +25,7 @@ deployment_type=atomic-enterprise
#use_cluster_metrics=true
# Pre-release registry URL
-#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+#oreg_url=example.com/openshift3/ose-${component}:${version}
# Pre-release Dev puddle repo
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
@@ -41,6 +42,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
# Configure Fluentd
#use_fluentd=true
@@ -50,21 +61,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Set cockpit plugins
#osm_cockpit_plugins=['cockpit-kubernetes']
-# master cluster ha variables using pacemaker or RHEL HA
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
#openshift_master_cluster_password=openshift_cluster
#openshift_master_cluster_vip=192.168.133.25
#openshift_master_cluster_public_vip=192.168.133.25
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-# master cluster ha variables when using a different HA solution
-# For installation the value of openshift_master_cluster_hostname must resolve
-# to the first master defined in the inventory.
-# The HA solution must be manually configured after installation and must ensure
-# that the master is running on a single master host.
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_defer_ha=True
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
# default subdomain to use for exposed routes
#osm_default_subdomain=apps.test.example.com
@@ -75,13 +94,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default project node selector
#osm_default_node_selector='region=primary'
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
# default selectors for router and registry services
# openshift_router_selector='region=infra'
# openshift_registry_selector='region=infra'
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -89,6 +142,9 @@ ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
+[lb]
+ose3-lb-ansible.test.example.com
+
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 6ed12e011..99746cdbf 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -66,12 +66,22 @@ Examples:
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
- $ plugins/inventory/gce.py --host my_instance
+ $ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
@@ -102,9 +112,9 @@ class GceInventory(object):
# Just display data for specific host
if self.args.host:
- print self.json_format_dict(self.node_to_dict(
+ print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
- pretty=self.args.pretty)
+ pretty=self.args.pretty))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
@@ -120,7 +130,6 @@ class GceInventory(object):
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
@@ -174,7 +183,6 @@ class GceInventory(object):
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
-
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
@@ -213,8 +221,7 @@ class GceInventory(object):
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
- # Hosts don't always have a public IP name
- #'gce_public_ip': inst.public_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
@@ -222,15 +229,15 @@ class GceInventory(object):
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
- # Hosts don't always have a public IP name
- #'ansible_ssh_host': inst.public_ips[0]
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
- except Exception, e:
+ except Exception as e:
return None
def group_instances(self):
@@ -250,7 +257,10 @@ class GceInventory(object):
tags = node.extra['tags']
for t in tags:
- tag = 'tag_%s' % t
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index 99f157b11..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/hosts/ec2.py
- provider_config:
- ec2:
- regions: all
- regions_exclude: us-gov-west-1,cn-north-1
- destination_variable: public_dns_name
- route53: False
- cache_path: ~/.ansible/tmp
- cache_max_age: 300
- vpc_destination_variable: ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- all_group: ec2
- hostvars:
- cloud: aws
- account: aws1
-
-- name: aws2
- provider: aws/hosts/ec2.py
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py
index 2cbf33473..354a8c10c 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_inventory.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python2
'''
- Fetch and combine multiple ec2 account settings into a single
+ Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
import fcntl
import tempfile
import copy
+from string import Template
+import shutil
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+ '''Exceptions for MultiInventory class'''
+ pass
+
+class MultiInventory(object):
'''
- MultiEc2 class:
+ MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
@@ -35,7 +41,7 @@ class MultiEc2(object):
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
- self.all_ec2_results = {}
+ self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@@ -56,7 +62,7 @@ class MultiEc2(object):
cache is valid for the inventory.
if the cache is valid; return cache
- else the credentials are loaded from multi_ec2.yaml or from the env
+ else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
with open(conf_file) as conf:
config = yaml.safe_load(conf)
+ # Provide a check for unique account names
+ if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+ raise MultiInventoryException('Duplicate account names in config file')
+
return config
def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
else:
cmds.append('--list')
- cmds.append('--refresh-cache')
+ if 'aws' in provider.lower():
+ cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
- def generate_config(config_data):
- """Generate the ec2.ini file in as a secure temp file.
- Once generated, pass it to the ec2.py as an environment variable.
+ def generate_config(provider_files):
+ """Generate the provider_files in a temporary directory.
"""
- fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
- for section, values in config_data.items():
- os.write(fildes, "[%s]\n" % section)
- for option, value in values.items():
- os.write(fildes, "%s = %s\n" % (option, value))
- os.close(fildes)
- return tmp_file_path
+ prefix = 'multi_inventory.'
+ tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+ for provider_file in provider_files:
+ filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+ content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+ filedes.write(content)
+ filedes.close()
+
+ return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
'''
try:
all_results = []
- tmp_file_paths = []
+ tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
- env = account['env_vars']
- if account.has_key('provider_config'):
- tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
- env['EC2_INI_PATH'] = tmp_file_paths[-1]
+ tmp_dir = None
+ if account.has_key('provider_files'):
+ tmp_dir = MultiInventory.generate_config(account['provider_files'])
+ tmp_dir_paths.append(tmp_dir)
+
+ # Update env vars after creating provider_config_files
+ # so that we can grab the tmp_dir if it exists
+ env = account.get('env_vars', {})
+ if env and tmp_dir:
+ for key, value in env.items():
+ env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
})
finally:
- # Clean up the mkstemp file
- for tmp_file in tmp_file_paths:
- os.unlink(tmp_file)
+ # Clean up the mkdtemp dirs
+ for tmp_dir in tmp_dir_paths:
+ shutil.rmtree(tmp_dir)
return all_results
@@ -223,7 +243,7 @@ class MultiEc2(object):
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
- self.all_ec2_results[result['name']] = json.loads(result['out'])
+ self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
@@ -231,33 +251,65 @@ class MultiEc2(object):
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
- values = self.all_ec2_results.values()
+ values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
- MultiEc2.merge_destructively(self.result, result)
+ MultiInventory.merge_destructively(self.result, result)
+
+ def add_entry(self, data, keys, item):
+ ''' Add an item to a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ item = c
+ '''
+ if "." in keys:
+ key, rest = keys.split(".", 1)
+ if key not in data:
+ data[key] = {}
+ self.add_entry(data[key], rest, item)
+ else:
+ data[keys] = item
+
+ def get_entry(self, data, keys):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ return c
+ '''
+ if keys and "." in keys:
+ key, rest = keys.split(".", 1)
+ return self.get_entry(data[key], rest)
+ else:
+ return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
- if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
- return
-
- results = self.all_ec2_results[acc_config['name']]
- # Update each hostvar with the newly desired key: value
- for host_property, value in acc_config['hostvars'].items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
+ results = self.all_inventory_results[acc_config['name']]
+ results['all_hosts'] = results['_meta']['hostvars'].keys()
+
+ # Update each hostvar with the newly desired key: value from extra_*
+ for _extra in ['extra_vars', 'extra_groups']:
+ for new_var, value in acc_config.get(_extra, {}).items():
for data in results['_meta']['hostvars'].values():
- data[str(host_property)] = str(value)
+ self.add_entry(data, new_var, value)
+
+ # Add this group
+ if _extra == 'extra_groups':
+ results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
+
+ # Clone groups goes here
+ for to_name, from_name in acc_config.get('clone_groups', {}).items():
+ if results.has_key(from_name):
+ results[to_name] = copy.copy(results[from_name])
- # Add this group
- if results.has_key(acc_config['all_group']):
- results["%s_%s" % (host_property, value)] = \
- copy.copy(results[acc_config['all_group']])
+ # Clone vars goes here
+ for to_name, from_name in acc_config.get('clone_vars', {}).items():
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, to_name, self.get_entry(data, from_name))
- # store the results back into all_ec2_results
- self.all_ec2_results[acc_config['name']] = results
+ # store the results back into all_inventory_results
+ self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
@@ -265,7 +317,7 @@ class MultiEc2(object):
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiEc2.merge_destructively(input_a[key], input_b[key])
+ MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
@@ -321,7 +373,7 @@ class MultiEc2(object):
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
- json_data = MultiEc2.json_format_dict(self.result, True)
+ json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
@@ -357,7 +409,7 @@ class MultiEc2(object):
if __name__ == "__main__":
- MEC2 = MultiEc2()
- MEC2.parse_cli_args()
- MEC2.run()
- print MEC2.result_str()
+ MI2 = MultiInventory()
+ MI2.parse_cli_args()
+ MI2.run()
+ print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
new file mode 100644
index 000000000..0f0788d18
--- /dev/null
+++ b/inventory/multi_inventory.yaml.example
@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+ - name: aws1
+ provider: aws/ec2.py
+ provider_files:
+ - name: ec2.ini
+ content: |-
+ [ec2]
+ regions = all
+ regions_exclude = us-gov-west-1,cn-north-1
+ destination_variable = public_dns_name
+ route53 = False
+ cache_path = ~/.ansible/tmp
+ cache_max_age = 300
+ vpc_destination_variable = ip_address
+ env_vars:
+ AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+ AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ extra_vars:
+ cloud: aws
+ account: aws1
+
+- name: mygce
+ extra_vars:
+ cloud: gce
+ account: gce1
+ env_vars:
+ GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ provider: gce/gce.py
+ provider_files:
+ - name: priv_key.pem
+ contents: |-
+ -----BEGIN PRIVATE KEY-----
+ yourprivatekeydatahere
+ -----END PRIVATE KEY-----
+ - name: gce.ini
+ contents: |-
+ [gce]
+ gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+ gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ gce_project_id = gce-project
+ zone = us-central1-a
+ network = default
+ gce_machine_type = n1-standard-2
+ gce_machine_image = rhel7
+
+cache_max_age: 600
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
deleted file mode 100644
index f163f865a..000000000
--- a/inventory/openshift-ansible-inventory.spec
+++ /dev/null
@@ -1,108 +0,0 @@
-Summary: OpenShift Ansible Inventories
-Name: openshift-ansible-inventory
-Version: 0.0.9
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}/usr/share/ansible/inventory
-mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
-mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
-
-cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
-cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
-
-%files
-%config(noreplace) /etc/ansible/*
-%dir /usr/share/ansible/inventory
-/usr/share/ansible/inventory/multi_ec2.py*
-/usr/share/ansible/inventory/aws/ec2.py*
-/usr/share/ansible/inventory/gce/gce.py*
-
-%changelog
-* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.9-1
-- Merge pull request #408 from sdodson/docker-buildvm (bleanhar@redhat.com)
-- Merge pull request #428 from jtslear/issue-383
- (twiest@users.noreply.github.com)
-- Merge pull request #407 from aveshagarwal/ae-ansible-merge-auth
- (bleanhar@redhat.com)
-- Enable htpasswd by default in the example hosts file. (avagarwa@redhat.com)
-- Add support for setting default node selector (jdetiber@redhat.com)
-- Merge pull request #429 from spinolacastro/custom_cors (bleanhar@redhat.com)
-- Updated to read config first and default to users home dir
- (kwoodson@redhat.com)
-- Fix Custom Cors (spinolacastro@gmail.com)
-- Revert "namespace the byo inventory so the group names aren't so generic"
- (sdodson@redhat.com)
-- Removes hardcoded python2 (jtslear@gmail.com)
-- namespace the byo inventory so the group names aren't so generic
- (admiller@redhat.com)
-- docker-buildvm-rhose is dead (sdodson@redhat.com)
-- Add support for setting routingConfig:subdomain (jdetiber@redhat.com)
-- Initial HA master (jdetiber@redhat.com)
-- Make it clear that the byo inventory file is just an example
- (jdetiber@redhat.com)
-- Playbook updates for clustered etcd (jdetiber@redhat.com)
-- Update for RC2 changes (sdodson@redhat.com)
-- Templatize configs and 0.5.2 changes (jdetiber@redhat.com)
-
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
-- Added more verbosity when error happens. Also fixed a bug.
- (kwoodson@redhat.com)
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * rename openshift_registry_url oreg_url * rename option_images to
- _{oreg|ortr}_images (jhonce@redhat.com)
-- Fix the remaining pylint warnings (lhuard@amadeus.com)
-- Fix some of the pylint warnings (lhuard@amadeus.com)
-- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com)
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
-- Making multi_ec2 into a library (kwoodson@redhat.com)
-
-* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- Added support for grouping and a bug fix. (kwoodson@redhat.com)
-
-* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
- not dictating what the ec2.ini file should look like. (twiest@redhat.com)
-- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- fixed build problems with openshift-ansible-inventory.spec
- (twiest@redhat.com)
-- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
-- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
-- Adding refresh-cache option and cleanup for pylint. Also updated for
- aws/hosts/ being added. (kwoodson@redhat.com)
-
-* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added the ability to have a config file in /etc/openshift_ansible to
- multi_ec2.py. (twiest@redhat.com)
-- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
-- gce inventory/playbook updates for node registration changes
- (jdetiber@redhat.com)
-- Various fixes (jdetiber@redhat.com)
-
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
new file mode 100644
index 000000000..21f624400
--- /dev/null
+++ b/openshift-ansible.spec
@@ -0,0 +1,542 @@
+# %commit is intended to be set by tito custom builders provided
+# in the .tito/lib directory. The values in this spec file will not be kept up to date.
+%{!?commit:
+%global commit c64d09e528ca433832c6b6e6f5c7734a9cc8ee6f
+}
+
+Name: openshift-ansible
+Version: 3.0.12
+Release: 1%{?dist}
+Summary: Openshift and Atomic Enterprise Ansible
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
+BuildArch: noarch
+
+Requires: ansible
+
+%description
+Openshift and Atomic Enterprise Ansible
+
+This repo contains Ansible code and playbooks
+for Openshift and Atomic Enterprise.
+
+%prep
+%setup -q
+
+%build
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py build
+popd
+
+%install
+# Base openshift-ansible install
+mkdir -p %{buildroot}%{_datadir}/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible_plugins
+
+# openshift-ansible-bin install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+cp -p bin/{ossh,oscp,opssh,opscp,ohi} %{buildroot}%{_bindir}
+cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
+cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+# Fix links
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
+ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
+
+# openshift-ansible-docs install
+# -docs are currently just %doc, no install needed
+
+# openshift-ansible-inventory install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
+cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
+cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
+
+# openshift-ansible-playbooks install
+cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-roles install
+cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-filter-plugins install
+cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# openshift-ansible-lookup-plugins install
+cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py install --skip-build --root %{buildroot}
+# Remove this line once the name change has happened
+mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
+mkdir -p %{buildroot}%{_datadir}/atomic-openshift-utils/
+cp etc/ansible.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible.cfg
+popd
+
+# Base openshift-ansible files
+%files
+%doc LICENSE.md README*
+%dir %{_datadir}/ansible/%{name}
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-bin subpackage
+# ----------------------------------------------------------------------------------
+%package bin
+Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
+Requires: %{name}-inventory
+Requires: python2
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description bin
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%files bin
+%{_bindir}/*
+%exclude %{_bindir}/atomic-openshift-installer
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-docs subpackage
+# ----------------------------------------------------------------------------------
+%package docs
+Summary: Openshift and Atomic Enterprise Ansible documents
+Requires: %{name}
+BuildArch: noarch
+
+%description docs
+%{summary}.
+
+%files docs
+%doc docs
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-inventory subpackage
+# ----------------------------------------------------------------------------------
+%package inventory
+Summary: Openshift and Atomic Enterprise Ansible Inventories
+Requires: python2
+BuildArch: noarch
+
+%description inventory
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%files inventory
+%config(noreplace) /etc/ansible/*
+%dir %{_datadir}/ansible/inventory
+%{_datadir}/ansible/inventory/multi_inventory.py*
+
+%package inventory-aws
+Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
+Requires: %{name}-inventory
+Requires: python-boto
+BuildArch: noarch
+
+%description inventory-aws
+Ansible Inventories for AWS used with the openshift-ansible scripts and playbooks.
+
+%files inventory-aws
+%{_datadir}/ansible/inventory/aws/ec2.py*
+
+%package inventory-gce
+Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
+Requires: %{name}-inventory
+Requires: python-libcloud >= 0.13
+BuildArch: noarch
+
+%description inventory-gce
+Ansible Inventories for GCE used with the openshift-ansible scripts and playbooks.
+
+%files inventory-gce
+%{_datadir}/ansible/inventory/gce/gce.py*
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-playbooks subpackage
+# ----------------------------------------------------------------------------------
+%package playbooks
+Summary: Openshift and Atomic Enterprise Ansible Playbooks
+Requires: %{name}
+Requires: %{name}-roles
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
+BuildArch: noarch
+
+%description playbooks
+%{summary}.
+
+%files playbooks
+%{_datadir}/ansible/%{name}/playbooks
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-roles subpackage
+# ----------------------------------------------------------------------------------
+%package roles
+Summary: Openshift and Atomic Enterprise Ansible roles
+Requires: %{name}
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
+BuildArch: noarch
+
+%description roles
+%{summary}.
+
+%files roles
+%{_datadir}/ansible/%{name}/roles
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-filter-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package filter-plugins
+Summary: Openshift and Atomic Enterprise Ansible filter plugins
+Requires: %{name}
+BuildArch: noarch
+
+%description filter-plugins
+%{summary}.
+
+%files filter-plugins
+%{_datadir}/ansible_plugins/filter_plugins
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-lookup-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package lookup-plugins
+Summary: Openshift and Atomic Enterprise Ansible lookup plugins
+Requires: %{name}
+BuildArch: noarch
+
+%description lookup-plugins
+%{summary}.
+
+%files lookup-plugins
+%{_datadir}/ansible_plugins/lookup_plugins
+
+# ----------------------------------------------------------------------------------
+# atomic-openshift-utils subpackage
+# ----------------------------------------------------------------------------------
+
+%package -n atomic-openshift-utils
+Summary: Atomic OpenShift Utilities
+BuildRequires: python-setuptools
+Requires: openshift-ansible-playbooks
+Requires: openshift-ansible-roles
+Requires: ansible
+Requires: python-click
+Requires: python-setuptools
+Requires: PyYAML
+BuildArch: noarch
+
+%description -n atomic-openshift-utils
+Atomic OpenShift Utilities includes
+ - atomic-openshift-installer
+ - other utilities
+
+%files -n atomic-openshift-utils
+%{python_sitelib}/ooinstall*
+%{_bindir}/atomic-openshift-installer
+%{_datadir}/atomic-openshift-utils/ansible.cfg
+
+
+%changelog
+* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.12-1
+- Sync with the latest image streams (sdodson@redhat.com)
+
+* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.11-1
+- Migrate xpaas content from pre v1.1.0 (sdodson@redhat.com)
+- Import latest xpaas templates and image streams (sdodson@redhat.com)
+
+* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.10-1
+- Fix update error for templates that didn't previously exist
+ (jdetiber@redhat.com)
+- General cleanup of v3_0_to_v3_1/upgrade.yml (jdetiber@redhat.com)
+- Add zabbix pieces to hold AWS S3 bucket stats (jdiaz@redhat.com)
+- add ansible dep to vagrant doc (jdetiber@redhat.com)
+- oo_filter: don't fail when attribute is not defined (tob@butter.sh)
+
+* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.9-1
+- Refactor upgrade playbook(s) (jdetiber@redhat.com)
+
+* Tue Nov 10 2015 Scott Dodson <sdodson@redhat.com> 3.0.8-1
+- Add origin-clients to uninstall playbook. (abutcher@redhat.com)
+- examples: include logging and metrics infrastructure (lmeyer@redhat.com)
+- Add separate step to enable services during upgrade. (dgoodwin@redhat.com)
+- Update tests now that cli is not asking for rpm/container install
+ (smunilla@redhat.com)
+- atomic-openshift-installer: Remove question for container install
+ (smunilla@redhat.com)
+- Remove references to multi_ec2.py (jdetiber@redhat.com)
+- 1279746: Fix leftover disabled features line in config template.
+ (dgoodwin@redhat.com)
+- 1279734: Ensure services are enabled after upgrade. (dgoodwin@redhat.com)
+- Fix missing etcd_data_dir bug. (dgoodwin@redhat.com)
+- Package the default ansible.cfg with atomic-openshift-utils.
+ (dgoodwin@redhat.com)
+- Add ldap auth identity provider to example inventory. (abutcher@redhat.com)
+- Read etcd data dir from appropriate config file. (dgoodwin@redhat.com)
+- atomic-openshift-installer: Generate inventory off hosts_to_run_on
+ (smunilla@redhat.com)
+- Various fixes related to connect_to (bleanhar@redhat.com)
+- Remove upgrade playbook restriction on 3.0.2. (dgoodwin@redhat.com)
+- Conditionals for flannel etcd client certs. (abutcher@redhat.com)
+- New `iptablesSyncPeriod` field in node configuration (abutcher@redhat.com)
+- Fix indentation on when (jdetiber@redhat.com)
+- Bug 1278863 - Error using openshift_pkg_version (jdetiber@redhat.com)
+- more cleanup of names (mwoodson@redhat.com)
+- Missing conditionals for api/controller sysconfig. (abutcher@redhat.com)
+- Updating the atomic-openshift-isntaller local connection logic for the
+ connect_to addition. (bleanhar@redhat.com)
+- cleaned up network checks (mwoodson@redhat.com)
+- Minor upgrade improvements. (dgoodwin@redhat.com)
+- Wait for cluster to recover after pcs resource restart. (abutcher@redhat.com)
+- Bug 1278245 - Failed to add node to existing env using atomic-openshift-
+ installer (bleanhar@redhat.com)
+- remove debug statement (jdetiber@redhat.com)
+- Fix removal of kubernetesMasterConfig.apiLevels (jdetiber@redhat.com)
+- atomic-openshift-installer: Better specification of ansible connection point
+ (smunilla@redhat.com)
+- Fix issues related to upgrade packages being unavailable
+ (jdetiber@redhat.com)
+- added network checks. also updated item prototype code to support more
+ (mwoodson@redhat.com)
+- Fix data_dir for 3.0 deployments (jdetiber@redhat.com)
+- Fix apiLevels modifications (jdetiber@redhat.com)
+- Fix creation of origin symlink when dir already exists. (dgoodwin@redhat.com)
+- apiLevel changes (jdetiber@redhat.com)
+- Write new config to disk after successful upgrade. (dgoodwin@redhat.com)
+- Fix pylint errors with getting hosts to run on. (dgoodwin@redhat.com)
+- Remove v1beta3 by default for kube_nfs_volumes (jdetiber@redhat.com)
+- Add pre-upgrade script to be run on first master. (dgoodwin@redhat.com)
+- Start to handle pacemaker ha during upgrade (abutcher@redhat.com)
+- Fix lb group related errors (jdetiber@redhat.com)
+- Fix file check conditional. (abutcher@redhat.com)
+- Don't check for certs in data_dir just raise when they can't be found. Fix
+ typo. (abutcher@redhat.com)
+- exclude atomic-openshift-installer from bin subpackage (tdawson@redhat.com)
+- add master_hostnames definition for upgrade (jdetiber@redhat.com)
+- Additional upgrade enhancements (jdetiber@redhat.com)
+- Handle backups for separate etcd hosts if necessary. (dgoodwin@redhat.com)
+- Further upgrade improvements (jdetiber@redhat.com)
+- Upgrade improvements (dgoodwin@redhat.com)
+- Bug 1278243 - Confusing prompt from atomic-openshift-installer
+ (bleanhar@redhat.com)
+- Bug 1278244 - Previously there was no way to add a node in unattended mode
+ (bleanhar@redhat.com)
+- Revert to defaults (abutcher@redhat.com)
+- Bug 1278244 - Incorrect node information gathered by atomic-openshift-
+ installer (bleanhar@redhat.com)
+- atomic-openshift-installer's unattended mode wasn't work with --force for all
+ cases (bleanhar@redhat.com)
+- Making it easier to use pre-release content (bleanhar@redhat.com)
+- The uninstall playbook needs to remove /run/openshift-sdn
+ (bleanhar@redhat.com)
+- Various HA changes for pacemaker and native methods. (abutcher@redhat.com)
+- Bug 1274201 - Fixing non-root installations if using a local connection
+ (bleanhar@redhat.com)
+- Bug 1274201 - Fixing sudo non-interactive test (bleanhar@redhat.com)
+- Bug 1277592 - SDN MTU has hardcoded default (jdetiber@redhat.com)
+- Atomic Enterprise/OpenShift Enterprise merge update (jdetiber@redhat.com)
+- fix dueling controllers - without controllerLeaseTTL set in config, multiple
+ controllers will attempt to start (jdetiber@redhat.com)
+- default to source persistence for haproxy (jdetiber@redhat.com)
+- hardcode openshift binaries for now (jdetiber@redhat.com)
+- more tweaks (jdetiber@redhat.com)
+- more tweaks (jdetiber@redhat.com)
+- additional ha related updates (jdetiber@redhat.com)
+- additional native ha changes (abutcher@redhat.com)
+- Start of true master ha (jdetiber@redhat.com)
+- Atomic Enterprise related changes. (avagarwa@redhat.com)
+- Remove pacemaker bits. (abutcher@redhat.com)
+- Override hosts deployment_type fact for version we're upgrading to.
+ (dgoodwin@redhat.com)
+- Pylint fixes for config upgrade module. (dgoodwin@redhat.com)
+- Disable proxy cert config upgrade until certs being generated.
+ (dgoodwin@redhat.com)
+- remove debug line (florian.lambert@enovance.com)
+- [roles/openshift_master_certificates/tasks/main.yml] Fix variable
+ openshift.master.all_hostnames to openshift.common.all_hostnames
+ (florian.lambert@enovance.com)
+- Fix bug with not upgrading openshift-master to atomic-openshift-master.
+ (dgoodwin@redhat.com)
+- Adding aws and gce packages to ansible-inventory (kwoodson@redhat.com)
+- Fix subpackage dependencies (jdetiber@redhat.com)
+- Refactor common group evaluation to avoid duplication (jdetiber@redhat.com)
+- common/openshift-cluster: Scaleup playbook (smunilla@redhat.com)
+- Fix bug from module rename. (dgoodwin@redhat.com)
+- Fix bug with default ansible playbook dir. (dgoodwin@redhat.com)
+- Use the base package upgrade version so we can check things earlier.
+ (dgoodwin@redhat.com)
+- Skip fail if enterprise deployment type depending on version.
+ (dgoodwin@redhat.com)
+- Add debug output for location of etcd backup. (dgoodwin@redhat.com)
+- Filter internal hostnames from the list of parsed names.
+ (abutcher@redhat.com)
+- Move config upgrade to correct place, fix node facts. (dgoodwin@redhat.com)
+- Add custom certificates to serving info in master configuration.
+ (abutcher@redhat.com)
+- Add in proxyClientInfo if missing during config upgrade.
+ (dgoodwin@redhat.com)
+- Implement master-config.yaml upgrade for v1beta3 apiLevel removal.
+ (dgoodwin@redhat.com)
+- Fix installer upgrade bug following pylint fix. (dgoodwin@redhat.com)
+- Document the new version field for installer config. (dgoodwin@redhat.com)
+- Remove my username from some test data. (dgoodwin@redhat.com)
+- Add a simple version for the installer config file. (dgoodwin@redhat.com)
+- Pylint fix. (dgoodwin@redhat.com)
+- Fix issue with master.proxy-client.{crt,key} and omit. (abutcher@redhat.com)
+- initial module framework (jdetiber@redhat.com)
+- Better info prior to initiating upgrade. (dgoodwin@redhat.com)
+- Fix etcd backup bug with not-yet-created /var/lib/origin symlink
+ (dgoodwin@redhat.com)
+- Print info after upgrade completes. (dgoodwin@redhat.com)
+- Automatically upgrade legacy config files. (dgoodwin@redhat.com)
+- Remove devel fail and let upgrade proceed. (dgoodwin@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Generate timestamped etcd backups. (dgoodwin@redhat.com)
+- Add etcd_data_dir fact. (dgoodwin@redhat.com)
+- Functional disk space checking for etcd backup. (dgoodwin@redhat.com)
+- First cut at checking available disk space for etcd backup.
+ (dgoodwin@redhat.com)
+- Block upgrade if targetting enterprise deployment type. (dgoodwin@redhat.com)
+- Change flannel registration default values (sbaubeau@redhat.com)
+- Remove empty notify section (sbaubeau@redhat.com)
+- Check etcd certs exist for flannel when its support is enabled
+ (sbaubeau@redhat.com)
+- Fix when neither use_openshift_sdn nor use_flannel are specified
+ (sbaubeau@redhat.com)
+- Generate etcd certificats for flannel when is not embedded
+ (sbaubeau@redhat.com)
+- Add missing 2nd true parameters to default Jinja filter (sbaubeau@redhat.com)
+- Use 'command' module instead of 'shell' (sbaubeau@redhat.com)
+- Add flannel modules documentation (sbaubeau@redhat.com)
+- Only remove IPv4 address from docker bridge (sbaubeau@redhat.com)
+- Remove multiple use_flannel fact definition (sbaubeau@redhat.com)
+- Ensure openshift-sdn and flannel can't be used at the same time
+ (sbaubeau@redhat.com)
+- Add flannel support (sbaubeau@redhat.com)
+
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+ (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+ (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+ (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+ warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+ (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+ (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+ (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
+* Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
+- Adding python-boto and python-libcloud to openshift-ansible-inventory
+ dependency (kwoodson@redhat.com)
+- Use more specific enterprise version for version_greater_than_3_1_or_1_1.
+ (abutcher@redhat.com)
+- Conditionalizing the support for the v1beta3 api (bleanhar@redhat.com)
+
+* Thu Oct 29 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.5-1
+- Updating multi_ec2 to support extra_vars and extra_groups
+ (kwoodson@redhat.com)
+- Removing the template and doing to_nice_yaml instead (kwoodson@redhat.com)
+- README_AEP.md: update instructions for creating router and registry
+ (jlebon@redhat.com)
+- README_AEP: Various fixes (walters@verbum.org)
+- Fixing for extra_vars rename. (kwoodson@redhat.com)
+- make storage_plugin_deps conditional on deployment_type (jdetiber@redhat.com)
+- remove debugging pauses (jdetiber@redhat.com)
+- make storage plugin dependency installation more flexible
+ (jdetiber@redhat.com)
+- Install storage plugin dependencies (jdetiber@redhat.com)
+
+* Wed Oct 28 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.4-1
+- Removing spec files. (kwoodson@redhat.com)
+- Updated example (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.11-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.21-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.10-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.20-1].
+ (kwoodson@redhat.com)
+- Adding tito releasers configuration (bleanhar@redhat.com)
+- Bug fixes for the uninstall playbook (bleanhar@redhat.com)
+- Adding clone vars and groups. Renamed hostvars to extra_vars.
+ (kwoodson@redhat.com)
+- Start tracking docker info execution time (jdiaz@redhat.com)
+- The uninstall playbook should remove the kubeconfig for non-root installs
+ (bleanhar@redhat.com)
+- Adding uninstall support for Atomic Host (bleanhar@redhat.com)
+- add examples for SDN configuration (jdetiber@redhat.com)
+
+* Tue Oct 27 2015 Troy Dawson <tdawson@redhat.com> 3.0.3-1
+- Pylint fixes and ignores for incoming oo-install code. (dgoodwin@redhat.com)
+- Pylint fixes (abutcher@redhat.com)
+- Adding zabbix type and fixing zabbix agent vars (kwoodson@redhat.com)
+- Add atomic-openshift-utils add atomic-openshift-utils to openshift-
+ ansible.spec file (tdawson@redhat.com)
+- Fix quotes (spinolacastro@gmail.com)
+- Use standard library for version comparison. (abutcher@redhat.com)
+- added docker info to the end of docker loop to direct lvm playbook.
+ (twiest@redhat.com)
+- Add missing quotes (spinolacastro@gmail.com)
+- Adding Docker Log Options capabilities (epo@jemba.net)
+- Move version greater_than_fact into openshift_facts (abutcher@redhat.com)
+- Don't include proxy client cert when <3.1 or <1.1 (abutcher@redhat.com)
+- Add proxy client certs to master config. (abutcher@redhat.com)
+- Update imagestreams and quickstarts from origin (sdodson@redhat.com)
+- Get default values from openshift_facts (spinolacastro@gmail.com)
+- Cleanup (spinolacastro@gmail.com)
+- Add missing inventory example (spinolacastro@gmail.com)
+- Custom Project Config (spinolacastro@gmail.com)
+
+* Mon Oct 19 2015 Troy Dawson <tdawson@redhat.com> 3.0.2-1
+- Initial Package
+
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index 614b2537a..72fcd77b3 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -97,8 +97,19 @@
- debug: var=setup_output
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
- name: start docker
- command: systemctl start docker.service
- register: dockerstart
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
- - debug: var=dockerstart
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 40db668da..e0dbad900 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -1,6 +1,6 @@
# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
# Enterprise content installed by ansible. This includes:
-#
+#
# configuration
# containers
# example templates and imagestreams
@@ -13,6 +13,20 @@
sudo: yes
tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
- service: name={{ item }} state=stopped
with_items:
- atomic-enterprise-master
@@ -31,8 +45,10 @@
- origin-master-api
- origin-master-controllers
- origin-node
+ - pcsd
- yum: name={{ item }} state=absent
+ when: not is_atomic | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
@@ -43,6 +59,7 @@
- atomic-openshift-master
- atomic-openshift-node
- atomic-openshift-sdn-ovs
+ - corosync
- etcd
- openshift
- openshift-master
@@ -51,14 +68,26 @@
- openshift-sdn-ovs
- openvswitch
- origin
+ - origin-clients
- origin-master
- origin-node
- origin-sdn-ovs
+ - pacemaker
+ - pcs
- tuned-profiles-atomic-enterprise-node
- tuned-profiles-atomic-openshift-node
- tuned-profiles-openshift-node
- tuned-profiles-origin-node
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
- shell: systemctl reset-failed
changed_when: False
@@ -112,8 +141,10 @@
- file: path={{ item }} state=absent
with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
+ - /etc/corosync
- /etc/etcd
- /etc/openshift
- /etc/openshift-sdn
@@ -127,8 +158,13 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-node
- /root/.kube
+ - /run/openshift-sdn
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/etcd
- /var/lib/openshift
- /var/lib/origin
+ - /var/lib/pacemaker
+
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
deleted file mode 120000
index 73cafffe5..000000000
--- a/playbooks/adhoc/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/adhoc/upgrades/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index a8e3e27bb..5aa6b0f9b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -11,6 +11,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 786918929..09bf34666 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -11,7 +11,7 @@
msg: Deployment type not supported for aws provider yet
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -19,7 +19,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -27,7 +27,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -38,7 +38,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9e50a4a18..411c7e660 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -4,6 +4,7 @@
g_etcd_group: "{{ 'etcd' }}"
g_masters_group: "{{ 'masters' }}"
g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
new file mode 100644
index 000000000..ce7aebf8e
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -0,0 +1,8 @@
+# Upgrade playbooks
+The playbooks provided in this directory can be used for upgrading an existing
+environment. Additional notes for the associated upgrade playbooks are
+provided in their respective directories.
+
+# Upgrades available
+- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md)
+- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md)
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
index 6de8a970f..c91a6cb96 100644
--- a/playbooks/adhoc/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
@@ -1,11 +1,11 @@
-# [NOTE]
-This playbook will re-run installation steps overwriting any local
+# v3.0 minor upgrade playbook
+**Note:** This playbook will re-run installation steps overwriting any local
modifications. You should ensure that your inventory has been updated with any
modifications you've made after your initial installation. If you find any items
that cannot be configured via ansible please open an issue at
https://github.com/openshift/openshift-ansible
-# Overview
+## Overview
This playbook is available as a technical preview. It currently performs the
following steps.
@@ -17,5 +17,5 @@ following steps.
* Updates the default registry if one exists
* Updates image streams and quickstarts
-# Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
new file mode 100644
index 000000000..76fa9ba22
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -0,0 +1,9 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
new file mode 100644
index 000000000..c434be5b7
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
@@ -0,0 +1,17 @@
+# v3.0 to v3.1 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+**TODO: update for current steps**
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..b06442366
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,9 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4c74f96db..a8bd634d3 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,68 +1,5 @@
---
-- name: Populate config host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - fail:
- msg: This playbook rquires g_etcd_group to be set
- when: g_etcd_group is not defined
-
- - fail:
- msg: This playbook rquires g_masters_group to be set
- when: g_masters_group is not defined
-
- - fail:
- msg: This playbook rquires g_nodes_group to be set
- when: g_nodes_group is not defined
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ groups[g_etcd_group][0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: evaluate_groups.yml
- include: ../openshift-etcd/config.yml
@@ -71,4 +8,4 @@
- include: ../openshift-node/config.yml
vars:
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
new file mode 100644
index 000000000..2bb69614f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -0,0 +1,76 @@
+---
+- name: Populate config host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook requires g_etcd_group to be set
+ when: g_etcd_group is not defined
+
+ - fail:
+ msg: This playbook requires g_masters_group to be set
+ when: g_masters_group is not defined
+
+ - fail:
+ msg: This playbook requires g_nodes_group to be set
+ when: g_nodes_group is not defined
+
+ - fail:
+ msg: This playbook requires g_lb_group to be set
+ when: g_lb_group is not defined
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_etcd_group] | default([])
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_nodes_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ groups[g_etcd_group][0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups[g_masters_group][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+ - name: Evaluate oo_lb_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_lb_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_lb_group] | default([])
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..6d2777732
--- /dev/null
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -0,0 +1,16 @@
+---
+- include: evaluate_groups.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: ../openshift-node/config.yml
+ vars:
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..1a6580795 100644
--- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
new file mode 100644
index 000000000..ed4ab6d1b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+"""
+Pre-upgrade checks that must be run on a master before proceeding with upgrade.
+"""
+# This is a script not a python module:
+# pylint: disable=invalid-name
+
+# NOTE: This script should not require any python libs other than what is
+# in the standard library.
+
+__license__ = "ASL 2.0"
+
+import json
+import os
+import subprocess
+import re
+
+# The maximum length of container.ports.name
+ALLOWED_LENGTH = 15
+# The valid structure of container.ports.name
+ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
+AT_LEAST_ONE_LETTER = re.compile('[a-z]')
+# look at OS_PATH for the full path. Default ot 'oc'
+OC_PATH = os.getenv('OC_PATH', 'oc')
+
+
+def validate(value):
+ """
+ validate verifies that value matches required conventions
+
+ Rules of container.ports.name validation:
+
+ * must be less that 16 chars
+ * at least one letter
+ * only a-z0-9-
+ * hyphens can not be leading or trailing or next to each other
+
+ :Parameters:
+ - `value`: Value to validate
+ """
+ if len(value) > ALLOWED_LENGTH:
+ return False
+
+ if '--' in value:
+ return False
+
+ # We search since it can be anywhere
+ if not AT_LEAST_ONE_LETTER.search(value):
+ return False
+
+ # We match because it must start at the beginning
+ if not ALLOWED_CHARS.match(value):
+ return False
+ return True
+
+
+def list_items(kind):
+ """
+ list_items returns a list of items from the api
+
+ :Parameters:
+ - `kind`: Kind of item to access
+ """
+ response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
+ items = json.loads(response)
+ return items.get("items", [])
+
+
+def get(obj, *paths):
+ """
+ Gets an object
+
+ :Parameters:
+ - `obj`: A dictionary structure
+ - `path`: All other non-keyword arguments
+ """
+ ret_obj = obj
+ for path in paths:
+ if ret_obj.get(path, None) is None:
+ return []
+ ret_obj = ret_obj[path]
+ return ret_obj
+
+
+# pylint: disable=too-many-arguments
+def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
+ """
+ Prints out results in human friendly way.
+
+ :Parameters:
+ - `namespace`: Namespace of the resource
+ - `kind`: Kind of the resource
+ - `item_name`: Name of the resource
+ - `container_name`: Name of the container. May be "" when kind=Service.
+ - `port_name`: Name of the port
+ - `valid`: True if the port is valid
+ """
+ if not valid:
+ if len(container_name) > 0:
+ print('%s/%s -n %s (Container="%s" Port="%s")' % (
+ kind, item_name, namespace, container_name, port_name))
+ else:
+ print('%s/%s -n %s (Port="%s")' % (
+ kind, item_name, namespace, port_name))
+
+
+def print_validation_header():
+ """
+ Prints the error header. Should run on the first error to avoid
+ overwhelming the user.
+ """
+ print """\
+At least one port name does not validate. Valid port names:
+
+ * must be less that 16 chars
+ * have at least one letter
+ * only a-z0-9-
+ * do not start or end with -
+ * Dashes may not be next to eachother ('--')
+"""
+
+
+def main():
+ """
+ main is the main entry point to this script
+ """
+ try:
+ # the comma at the end suppresses the newline
+ print "Checking for oc ...",
+ subprocess.check_output([OC_PATH, 'whoami'])
+ print "found"
+ except:
+ print(
+ 'Unable to run "%s whoami"\n'
+ 'Please ensure OpenShift is running, and "oc" is on your system '
+ 'path.\n'
+ 'You can override the path with the OC_PATH environment variable.'
+ % OC_PATH)
+ raise SystemExit(1)
+
+ # Where the magic happens
+ first_error = True
+ for kind, path in [
+ ('replicationcontrollers', ("spec", "template", "spec", "containers")),
+ ('pods', ("spec", "containers")),
+ ('deploymentconfigs', ("spec", "template", "spec", "containers"))]:
+ for item in list_items(kind):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for container in get(item, *path):
+ container_name = container["name"]
+ for port in get(container, "ports"):
+ port_name = port.get("name", None)
+ if not port_name:
+ # Unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, kind, item_name,
+ container_name, port_name, valid)
+
+ # Services follow a different flow
+ for item in list_items('services'):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for port in get(item, "spec", "ports"):
+ port_name = port.get("targetPort", None)
+ if isinstance(port_name, int) or port_name is None:
+ # Integer only or unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, "services", item_name, "", port_name, valid)
+
+ # If we had at least 1 error then exit with 1
+ if not first_error:
+ raise SystemExit(1)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
new file mode 100644
index 000000000..f90719cab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+
+yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+
+
+echo "---"
+echo "curr_version: ${yum_installed}"
+echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
new file mode 120000
index 000000000..b1213dedb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
new file mode 100755
index 000000000..a6721bb92
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+"""Ansible module for modifying OpenShift configs during an upgrade"""
+
+import os
+import yaml
+
+DOCUMENTATION = '''
+---
+module: openshift_upgrade_config
+short_description: OpenShift Upgrade Config
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+def modify_api_levels(level_list, remove, ensure, msg_prepend='',
+ msg_append=''):
+ """ modify_api_levels """
+ changed = False
+ changes = []
+
+ if not isinstance(remove, list):
+ remove = []
+
+ if not isinstance(ensure, list):
+ ensure = []
+
+ if not isinstance(level_list, list):
+ new_list = []
+ changed = True
+ changes.append("%s created missing %s" % (msg_prepend, msg_append))
+ else:
+ new_list = level_list
+ for level in remove:
+ if level in new_list:
+ new_list.remove(level)
+ changed = True
+ changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
+
+ for level in ensure:
+ if level not in new_list:
+ new_list.append(level)
+ changed = True
+ changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
+
+ return {'new_list': new_list, 'changed': changed, 'changes': changes}
+
+
+def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
+ """Main upgrade method for 3.0 to 3.1."""
+ changes = []
+
+ # Facts do not get transferred to the hosts where custom modules run,
+ # need to make some assumptions here.
+ master_config = os.path.join(config_base, 'master/master-config.yaml')
+
+ master_cfg_file = open(master_config, 'r')
+ config = yaml.safe_load(master_cfg_file.read())
+ master_cfg_file.close()
+
+
+ # Remove unsupported api versions and ensure supported api versions from
+ # master config
+ unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
+ supported_levels = ['v1']
+
+ result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
+ supported_levels, 'master-config.yaml:', 'from apiLevels')
+ if result['changed']:
+ config['apiLevels'] = result['new_list']
+ changes.append(result['changes'])
+
+ if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig'].pop('apiLevels')
+ changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+
+ # Add proxyClientInfo to master-config
+ if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig']['proxyClientInfo'] = {
+ 'certFile': 'master.proxy-client.crt',
+ 'keyFile': 'master.proxy-client.key'
+ }
+ changes.append("master-config.yaml: added proxyClientInfo")
+
+ if len(changes) > 0:
+ if backup:
+ # TODO: Check success:
+ ansible_module.backup_local(master_config)
+
+ # Write the modified config:
+ out_file = open(master_config, 'w')
+ out_file.write(yaml.safe_dump(config, default_flow_style=False))
+ out_file.close()
+
+ return changes
+
+
+def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
+ """Upgrade entry point."""
+ if from_version == '3.0':
+ if to_version == '3.1':
+ return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
+
+
+def main():
+ """ main """
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(required=True),
+ from_version=dict(required=True, choices=['3.0']),
+ to_version=dict(required=True, choices=['3.1']),
+ role=dict(required=True, choices=['master']),
+ backup=dict(required=False, default=True, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ from_version = module.params['from_version']
+ to_version = module.params['to_version']
+ role = module.params['role']
+ backup = module.params['backup']
+ config_base = module.params['config_base']
+
+ try:
+ changes = []
+ if role == 'master':
+ changes = upgrade_master(module, config_base, from_version,
+ to_version, backup)
+
+ changed = len(changes) > 0
+ return module.exit_json(changed=changed, changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception, e:
+ return module.fail_json(msg=str(e))
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
new file mode 120000
index 000000000..aff753026
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/roles b/playbooks/common/openshift-cluster/upgrades/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 56a1df860..9f7e49b93 100644
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -1,15 +1,12 @@
---
+- name: Evaluate groups
+ include: ../../evaluate_groups.yml
+
- name: Re-Run cluster configuration to apply latest configuration changes
- include: ../../common/openshift-cluster/config.yml
- vars:
- g_etcd_group: "{{ 'etcd' }}"
- g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
+ include: ../../config.yml
- name: Upgrade masters
- hosts: masters
+ hosts: oo_masters_to_config
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
@@ -19,7 +16,7 @@
service: name="{{ openshift.common.service_type}}-master" state=restarted
- name: Upgrade nodes
- hosts: nodes
+ hosts: oo_nodes_to_config
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
@@ -50,19 +47,6 @@
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --confirm
-- name: Update cluster policy bindings
- hosts: oo_first_master
- tasks:
- - name: oadm policy reconcile-cluster-role-bindings --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
-
- name: Upgrade default router
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..78797f8b8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,429 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Evaluate host groups
+ include: ../../evaluate_groups.yml
+
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate etcd_hosts_to_backup
+ hosts: localhost
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ gather_facts: no
+ tasks:
+ # Pacemaker is currently the only supported upgrade path for multiple masters
+ - fail:
+ msg: "openshift_master_cluster_method must be set to 'pacemaker'"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
+
+ - fail:
+ msg: >
+ This upgrade is only supported for origin and openshift-enterprise
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a 3.1 upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<')
+
+ # If this script errors out ansible will show the default stdout/stderr
+ # which contains details for the user:
+ - script: ../files/pre-upgrade-check
+
+
+- name: Verify upgrade can proceed
+ hosts: masters:nodes
+ tasks:
+ - name: Clean yum cache
+ command: yum clean all
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.0.6 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
+
+ - fail:
+ msg: Atomic OpenShift 3.1 packages not found
+ when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ yum:
+ pkg: etcd
+ state: latest
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+
+- name: Update deployment type
+ hosts: OSEv3
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade to latest available kernel
+ yum:
+ pkg: kernel
+ state: latest
+
+ - name: Upgrade master packages
+ command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
+
+ - name: Ensure python-yaml present for config upgrade
+ yum:
+ pkg: PyYAML
+ state: installed
+
+ - name: Upgrade master configuration
+ openshift_upgrade_config:
+ from_version: '3.0'
+ to_version: '3.1'
+ role: master
+ config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+ - set_fact:
+ master_certs_missing: True
+ master_cert_subdir: master-{{ openshift.common.hostname }}
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+
+
+- name: Generate missing master certificates
+ hosts: oo_first_master
+ vars:
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
+ masters_needing_certs: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | difference([groups.oo_first_master.0]) }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_master_certificates
+ post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
+ - name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
+ with_items: masters_needing_certs
+
+ - name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: masters_needing_certs
+
+
+- name: Sync generated certs, update service config and restart master services
+ hosts: oo_masters_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ tasks:
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+ dest: "{{ master_cert_config_dir }}"
+ when: inventory_hostname != groups.oo_first_master.0
+
+ - name: Restart master service
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Ensure the master service is enabled
+ service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
+ when: not openshift_master_ha | bool
+
+ - name: Check for configured cluster
+ stat:
+ path: /etc/corosync/corosync.conf
+ register: corosync_conf
+ when: openshift_master_ha | bool
+
+ - name: Destroy cluster
+ command: pcs cluster destroy --all
+ when: openshift_master_ha | bool and corosync_conf.stat.exists == true
+ run_once: true
+
+ - name: Start pcsd
+ service: name=pcsd enabled=yes state=started
+ when: openshift_master_ha | bool
+
+
+- name: Re-create cluster
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - name: Ensure node service enabled
+ service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+
+
+###############################################################################
+# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Restart master cluster
+ command: pcs resource restart master
+ when: openshift_master_ha | bool
+ run_once: true
+
+ - name: Wait for the clustered master service to be available
+ wait_for:
+ host: "{{ openshift_master_cluster_vip }}"
+ port: 8443
+ state: started
+ timeout: 180
+ delay: 90
+ when: openshift_master_ha | bool
+ run_once: true
+
+
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 952960652..ed23ada88 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -13,6 +13,8 @@
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ - role: etcd
+ local_facts: {}
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 0a3fe90e1..1b3fba3aa 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -34,7 +34,9 @@
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: master
local_facts:
@@ -44,12 +46,14 @@
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ - role: etcd
+ local_facts: {}
+ when: openshift.master.embedded_etcd | bool
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -137,6 +141,7 @@
openshift_master_certs_no_etcd:
- admin.crt
- master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- master.server.crt
- openshift-master.crt
- openshift-registry.crt
@@ -144,6 +149,7 @@
- etcd.server.crt
openshift_master_certs_etcd:
- master.etcd-client.crt
+
- set_fact:
openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
@@ -153,9 +159,9 @@
with_items: openshift_master_certs
register: g_master_cert_stat_result
- set_fact:
- master_certs_missing: "{{ g_master_cert_stat_result.results
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
| map(attribute='stat.exists')
- | list | intersect([false])}}"
+ | list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -166,6 +172,10 @@
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- openshift_master_certificates
@@ -187,6 +197,7 @@
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
with_items: masters_needing_certs
+
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
@@ -196,12 +207,84 @@
validate_checksum: yes
with_items: masters_needing_certs
+- name: Inspect named certificates
+ hosts: oo_first_master
+ tasks:
+ - name: Collect certificate names
+ set_fact:
+ parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+ when: openshift_master_named_certificates is defined
+
+- name: Compute haproxy_backend_servers
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - set_fact:
+ haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}"
+
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+ default_backend: atomic-openshift-api
+ haproxy_backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
+ roles:
+ - role: haproxy
+ when: groups.oo_masters_to_config | length > 1
+
+- name: Generate master session keys
+ hosts: oo_first_master
+ tasks:
+ - fail:
+ msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
+ - fail:
+ msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+ - name: Generate session authentication key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_auth_output
+ with_sequence: count=1
+ when: openshift_master_session_auth_secrets is undefined
+ - name: Generate session encryption key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_encryption_output
+ with_sequence: count=1
+ when: openshift_master_session_encryption_secrets is undefined
+ - set_fact:
+ session_auth_secret: "{{ openshift_master_session_auth_secrets
+ | default(session_auth_output.results
+ | map(attribute='stdout')
+ | list) }}"
+ session_encryption_secret: "{{ openshift_master_session_encryption_secrets
+ | default(session_encryption_output.results
+ | map(attribute='stdout')
+ | list) }}"
+
- name: Configure master instances
hosts: oo_masters_to_config
+ serial: 1
vars:
+ named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
+ openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
+ openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -230,11 +313,25 @@
omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
roles:
- role: openshift_master_cluster
- when: openshift_master_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
+- name: Determine cluster dns ip
+ hosts: oo_first_master
+ tasks:
+ - name: Get master service ip
+ command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
+ register: master_service_ip_output
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+
- name: Enable cockpit
hosts: oo_first_master
vars:
@@ -244,6 +341,14 @@
when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
+- name: Configure flannel
+ hosts: oo_first_master
+ vars:
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ roles:
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index a14ca8e11..8da9e231f 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -38,6 +38,22 @@
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
+ - name: Check status of flannel external etcd certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-ca.crt
+ register: g_external_etcd_flannel_cert_stat_result
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+ - set_fact:
+ etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
+ | map(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ etcd_cert_prefix: node.etcd-
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Create temp directory for syncing certs
hosts: localhost
@@ -50,6 +66,64 @@
register: mktemp
changed_when: False
+- name: Configure flannel etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ pre_tasks:
+ - set_fact:
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ roles:
+ - role: etcd_certificates
+ post_tasks:
+ - name: Create a tarball of the etcd flannel certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
+- name: Copy the external etcd flannel certs to the nodes
+ hosts: oo_nodes_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/node"
+ state: directory
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-client.key
+ - node.etcd-ca.crt
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
- name: Create node certificates
hosts: oo_first_master
vars:
@@ -84,6 +158,8 @@
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -100,6 +176,8 @@
when: certs_missing
roles:
- openshift_node
+ - role: flannel
+ when: openshift.common.use_flannel | bool
- role: nickhammond.logrotate
- role: fluentd_node
when: openshift.common.use_fluentd | bool
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 6ca4f7395..745161bcb 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -16,6 +16,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
index 0dfa3e9d7..c8f6065cd 100644
--- a/playbooks/gce/openshift-cluster/join_node.yml
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -46,4 +46,4 @@
openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index c22b897d5..8be5d53e7 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -9,7 +9,7 @@
- fail: msg="Deployment type not supported for gce provider yet"
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -17,7 +17,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -28,7 +28,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index c208eee81..4d1ae22ff 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index d3e768de5..8d7949dd1 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -17,7 +17,7 @@
- include: tasks/configure_libvirt.yml
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -25,7 +25,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -33,7 +33,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -44,7 +44,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index eacae7c7e..e0c966e45 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -19,5 +19,11 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
+
runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index a5ee2d6a5..888804e28 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -10,6 +10,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 49207c428..88e4ff874 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -17,7 +17,7 @@ TODO
Dependencies
------------
-None
+etcd-common
Example Playbook
----------------
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 0f216b84e..0fd3de585 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -2,16 +2,8 @@
etcd_interface: "{{ ansible_default_ipv4.interface }}"
etcd_client_port: 2379
etcd_peer_port: 2380
-etcd_peers_group: etcd
etcd_url_scheme: http
etcd_peer_url_scheme: http
-etcd_conf_dir: /etc/etcd
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index b897913f9..4c0efb97b 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,3 +1,4 @@
---
- name: restart etcd
service: name=etcd state=restarted
+ when: not etcd_service_status_changed | default(false)
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 92d44ef4d..a71b36237 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -17,4 +17,4 @@ galaxy_info:
- system
dependencies:
- { role: os_firewall }
-- { role: openshift_repos }
+- { role: etcd_common }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 656901409..fcbdecd37 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,4 +1,12 @@
---
+- fail:
+ msg: Interface {{ etcd_interface }} not found
+ when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
+
+- fail:
+ msg: IPv4 address not found for {{ etcd_interface }}
+ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+
- name: Install etcd
yum: pkg=etcd-2.* state=present
@@ -49,5 +57,5 @@
enabled: yes
register: start_result
-- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ etcd_service_status_changed = start_result | changed
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9ac23b1dd..32577c96c 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -1,9 +1,9 @@
{% macro initial_cluster() -%}
{% for host in groups[etcd_peers_group] -%}
{% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
{%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }},
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
{% endmacro -%}
diff --git a/roles/etcd_ca/meta/main.yml b/roles/etcd_ca/meta/main.yml
index fb9280c9e..d02456ca3 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/etcd_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_repos }
+- { role: etcd_common }
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index 625756867..d32f5e48c 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,14 +1,14 @@
---
- file:
- path: "{{ etcd_ca_dir }}/{{ item }}"
+ path: "{{ item }}"
state: directory
mode: 0700
owner: root
group: root
with_items:
- - certs
- - crl
- - fragments
+ - "{{ etcd_ca_new_certs_dir }}"
+ - "{{ etcd_ca_crl_dir }}"
+ - "{{ etcd_ca_dir }}/fragments"
- command: cp /etc/pki/tls/openssl.cnf ./
args:
@@ -22,25 +22,25 @@
- assemble:
src: "{{ etcd_ca_dir }}/fragments"
- dest: "{{ etcd_ca_dir }}/openssl.cnf"
+ dest: "{{ etcd_openssl_conf }}"
-- command: touch index.txt
+- command: touch {{ etcd_ca_db }}
args:
- chdir: "{{ etcd_ca_dir }}"
- creates: "{{ etcd_ca_dir }}/index.txt"
+ creates: "{{ etcd_ca_db }}"
- copy:
- dest: "{{ etcd_ca_dir }}/serial"
+ dest: "{{ etcd_ca_serial }}"
content: "01"
force: no
- command: >
- openssl req -config openssl.cnf -newkey rsa:4096
- -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self
- -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
- -days 365
+ openssl req -config {{ etcd_openssl_conf }} -newkey rsa:4096
+ -keyout {{ etcd_ca_key }} -new -out {{ etcd_ca_cert }}
+ -x509 -extensions {{ etcd_ca_exts_self }} -batch -nodes
+ -days {{ etcd_ca_default_days }}
+ -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
args:
chdir: "{{ etcd_ca_dir }}"
- creates: "{{ etcd_ca_dir }}/ca.crt"
+ creates: "{{ etcd_ca_cert }}"
environment:
- SAN: ''
+ SAN: 'etcd-signer'
diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd_ca/templates/openssl_append.j2
index de2adaead..f28316fc2 100644
--- a/roles/etcd_ca/templates/openssl_append.j2
+++ b/roles/etcd_ca/templates/openssl_append.j2
@@ -1,20 +1,20 @@
-[ etcd_v3_req ]
+[ {{ etcd_req_ext }} ]
basicConstraints = critical,CA:FALSE
keyUsage = digitalSignature,keyEncipherment
subjectAltName = ${ENV::SAN}
-[ etcd_ca ]
+[ {{ etcd_ca_name }} ]
dir = {{ etcd_ca_dir }}
-crl_dir = $dir/crl
-database = $dir/index.txt
-new_certs_dir = $dir/certs
-certificate = $dir/ca.crt
-serial = $dir/serial
-private_key = $dir/ca.key
-crl_number = $dir/crlnumber
-x509_extensions = etcd_v3_ca_client
-default_days = 365
+crl_dir = {{ etcd_ca_crl_dir }}
+database = {{ etcd_ca_db }}
+new_certs_dir = {{ etcd_ca_new_certs_dir }}
+certificate = {{ etcd_ca_cert }}
+serial = {{ etcd_ca_serial }}
+private_key = {{ etcd_ca_key }}
+crl_number = {{ etcd_ca_crl_number }}
+x509_extensions = {{ etcd_ca_exts_client }}
+default_days = {{ etcd_ca_default_days }}
default_md = sha256
preserve = no
name_opt = ca_default
@@ -23,27 +23,27 @@ policy = policy_anything
unique_subject = no
copy_extensions = copy
-[ etcd_v3_ca_self ]
+[ {{ etcd_ca_exts_self }} ]
authorityKeyIdentifier = keyid,issuer
basicConstraints = critical,CA:TRUE,pathlen:0
keyUsage = critical,digitalSignature,keyEncipherment,keyCertSign
subjectKeyIdentifier = hash
-[ etcd_v3_ca_peer ]
+[ {{ etcd_ca_exts_peer }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = clientAuth,serverAuth
keyUsage = digitalSignature,keyEncipherment
subjectKeyIdentifier = hash
-[ etcd_v3_ca_server ]
+[ {{ etcd_ca_exts_server }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = serverAuth
keyUsage = digitalSignature,keyEncipherment
subjectKeyIdentifier = hash
-[ etcd_v3_ca_client ]
+[ {{ etcd_ca_exts_client }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = clientAuth
diff --git a/roles/etcd_ca/vars/main.yml b/roles/etcd_ca/vars/main.yml
deleted file mode 100644
index 901e95027..000000000
--- a/roles/etcd_ca/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca
diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml
index 28f33f442..6aa4883e0 100644
--- a/roles/etcd_certificates/tasks/client.yml
+++ b/roles/etcd_certificates/tasks/client.yml
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ item.openshift.common.ip }}"
with_items: etcd_needing_client_certs
- file:
diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml
index da875e8ea..3bb715943 100644
--- a/roles/etcd_certificates/tasks/main.yml
+++ b/roles/etcd_certificates/tasks/main.yml
@@ -4,6 +4,3 @@
- include: server.yml
when: etcd_needing_server_certs is defined and etcd_needing_server_certs
-
-
-
diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml
index 727b7fa2c..3499dcbef 100644
--- a/roles/etcd_certificates/tasks/server.yml
+++ b/roles/etcd_certificates/tasks/server.yml
@@ -18,7 +18,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Sign and create the server crt
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Create the peer csr
@@ -47,7 +47,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Sign and create the peer crt
@@ -61,7 +61,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- file:
@@ -69,5 +69,3 @@
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
with_items: etcd_needing_server_certs
-
-
diff --git a/roles/etcd_certificates/vars/main.yml b/roles/etcd_certificates/vars/main.yml
deleted file mode 100644
index 0eaeeb82b..000000000
--- a/roles/etcd_certificates/vars/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca
-etcd_generated_certs_dir: /etc/etcd/generated_certs
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
new file mode 100644
index 000000000..131a01490
--- /dev/null
+++ b/roles/etcd_common/README.md
@@ -0,0 +1,34 @@
+etcd_common
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+openshift-repos
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
new file mode 100644
index 000000000..96f4b63af
--- /dev/null
+++ b/roles/etcd_common/defaults/main.yml
@@ -0,0 +1,30 @@
+---
+etcd_peers_group: etcd
+
+# etcd server vars
+etcd_conf_dir: /etc/etcd
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+# etcd ca vars
+etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
+etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server
+etcd_ca_exts_self: etcd_v3_ca_self
+etcd_ca_exts_client: etcd_v3_ca_client
+etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
+etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
+etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
+etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
+etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
+etcd_ca_default_days: 365
diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml
new file mode 100644
index 000000000..fb9280c9e
--- /dev/null
+++ b/roles/etcd_common/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_repos }
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..cd108495d
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- set_fact:
+ etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
+
+- fail:
+ msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
+ when: "'etcd_interface' in item.value and 'interface' not in item.value"
+ with_dict: etcd_host_int_map
+
+- fail:
+ msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
+ when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
+ with_dict: etcd_host_int_map
diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2
new file mode 100644
index 000000000..9c9c76413
--- /dev/null
+++ b/roles/etcd_common/templates/host_int_map.j2
@@ -0,0 +1,13 @@
+---
+{% for host in groups[etcd_peers_group] %}
+{% set entry=hostvars[host] %}
+{{ entry.inventory_hostname }}:
+{% if 'etcd_interface' in entry %}
+ etcd_interface: {{ entry.etcd_interface }}
+{% if entry.etcd_interface in entry.ansible_interfaces %}
+ interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
+{% endif %}
+{% else %}
+ interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
+{% endif %}
+{% endfor %}
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
new file mode 100644
index 000000000..b8aa830ac
--- /dev/null
+++ b/roles/flannel/README.md
@@ -0,0 +1,45 @@
+Role Name
+=========
+
+Configure flannel on openshift nodes
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|-----------------------------------------|-----------------------------------------------|
+| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
+| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_node
+ roles:
+ - { role: flannel, etcd_urls: ['https://127.0.0.1:2379'] }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
new file mode 100644
index 000000000..34cebda9c
--- /dev/null
+++ b/roles/flannel/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+flannel_interface: "{{ ansible_default_ipv4.interface }}"
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/node"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
new file mode 100644
index 000000000..f9b9ae7f1
--- /dev/null
+++ b/roles/flannel/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+- name: restart flanneld
+ sudo: true
+ service: name=flanneld state=restarted
+
+- name: restart docker
+ sudo: true
+ service: name=docker state=restarted
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
new file mode 100644
index 000000000..909bdbfa4
--- /dev/null
+++ b/roles/flannel/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: flannel management
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
new file mode 100644
index 000000000..acfb009ec
--- /dev/null
+++ b/roles/flannel/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Install flannel
+ sudo: true
+ yum: pkg=flannel state=present
+
+- name: Set flannel etcd url
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD=)"
+ line: '\1{{ etcd_hosts|join(",") }}'
+
+- name: Set flannel etcd key
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD_KEY=)"
+ line: '\1{{ flannel_etcd_key }}'
+
+- name: Set flannel options
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^#?(FLANNEL_OPTIONS=)"
+ line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
+
+- name: Enable flanneld
+ sudo: true
+ service:
+ name: flanneld
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Remove docker bridge ip
+ sudo: true
+ shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
+ notify:
+ - restart docker
+ - restart node
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
new file mode 100644
index 000000000..ba7541ab1
--- /dev/null
+++ b/roles/flannel_register/README.md
@@ -0,0 +1,47 @@
+Role Name
+=========
+
+Register flannel configuration into etcd
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|----------------------------------------------------|-------------------------------------------------|
+| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
+| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | {{ etcd_conf_dir }}/master.etcd-client.crt | SSL cert to use for etcd |
+| etcd_peer_key_file | {{ etcd_conf_dir }}/master.etcd-client.key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_master
+ roles:
+ - { flannel_register }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
new file mode 100644
index 000000000..269d1a17c
--- /dev/null
+++ b/roles/flannel_register/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_min_network: 172.30.5.0
+flannel_subnet_len: 24
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/master"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
+
diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml
new file mode 100644
index 000000000..73bddcca4
--- /dev/null
+++ b/roles/flannel_register/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: register flannel configuration into etcd
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
new file mode 100644
index 000000000..1629157c8
--- /dev/null
+++ b/roles/flannel_register/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Assures /etc/flannel dir exists
+ sudo: true
+ file: path=/etc/flannel state=directory
+
+- name: Generate etcd configuration for etcd
+ sudo: true
+ template:
+ src: "flannel-config.json"
+ dest: "/etc/flannel/config.json"
+
+- name: Insert flannel configuration into etcd
+ sudo: true
+ command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
new file mode 100644
index 000000000..89ce4c30b
--- /dev/null
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -0,0 +1,8 @@
+{
+ "Network": "{{ flannel_network }}",
+ "SubnetLen": {{ flannel_subnet_len }},
+ "SubnetMin": "{{ flannel_min_network }}",
+ "Backend": {
+ "Type": "host-gw"
+ }
+}
diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md
new file mode 100644
index 000000000..5bc415066
--- /dev/null
+++ b/roles/haproxy/README.md
@@ -0,0 +1,34 @@
+HAProxy
+=======
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
new file mode 100644
index 000000000..7ba5bd485
--- /dev/null
+++ b/roles/haproxy/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+haproxy_frontends:
+- name: main
+ binds:
+ - "*:80"
+ default_backend: default
+
+haproxy_backends:
+- name: default
+ balance: roundrobin
+ servers:
+ - name: web01
+ address: 127.0.0.1:9000
+ opts: check
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: haproxy stats
+ port: "9000/tcp"
+- service: haproxy balance
+ port: "8443/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
new file mode 100644
index 000000000..ee60adcab
--- /dev/null
+++ b/roles/haproxy/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart haproxy
+ service:
+ name: haproxy
+ state: restarted
diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml
new file mode 100644
index 000000000..0fad106a9
--- /dev/null
+++ b/roles/haproxy/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: HAProxy
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- { role: os_firewall }
+- { role: openshift_repos }
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
new file mode 100644
index 000000000..5638b7313
--- /dev/null
+++ b/roles/haproxy/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install haproxy
+ yum:
+ pkg: haproxy
+ state: present
+
+- name: Configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: root
+ group: root
+ mode: 0644
+ notify: restart haproxy
+
+- name: Enable and start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Pause 30 seconds if haproxy was just started
+ pause: seconds=30
+ when: start_result | changed
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2
new file mode 100644
index 000000000..c932af72f
--- /dev/null
+++ b/roles/haproxy/templates/haproxy.cfg.j2
@@ -0,0 +1,76 @@
+# Global settings
+#---------------------------------------------------------------------
+global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 300s
+ timeout server 300s
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+listen stats :9000
+ mode http
+ stats enable
+ stats uri /
+
+{% for frontend in haproxy_frontends %}
+frontend {{ frontend.name }}
+{% for bind in frontend.binds %}
+ bind {{ bind }}
+{% endfor %}
+ default_backend {{ frontend.default_backend }}
+{% if 'mode' in frontend %}
+ mode {{ frontend.mode }}
+{% endif %}
+{% if 'options' in frontend %}
+{% for option in frontend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% if 'redirects' in frontend %}
+{% for redirect in frontend.redirects %}
+ redirect {{ redirect }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
+{% for backend in haproxy_backends %}
+backend {{ backend.name }}
+ balance {{ backend.balance }}
+{% if 'mode' in backend %}
+ mode {{ backend.mode }}
+{% endif %}
+{% if 'options' in backend %}
+{% for option in backend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% for server in backend.servers %}
+ server {{ server.name }} {{ server.address }} {{ server.opts }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 56c69c286..1520f79b2 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443
# Token to use for authentication to the API server
kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+# API Version to use for kubernetes
+kube_api_version: v1
```
## Dependencies
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
index e296492f9..bdd994d07 100644
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -1,4 +1,10 @@
---
+kubernetes_url: https://172.30.0.1:443
+
+kube_api_version: v1
+
+kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
+
# Options of NFS exports.
nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index f4a506234..d1dcf261a 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -16,10 +16,11 @@
- include: nfs.yml
- name: export physical volumes
- uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
- method=POST
- body='{{ lookup("template", "../templates/nfs.json.j2") }}'
- body_format=json
- status_code=201
- HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ uri:
+ url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
+ method: POST
+ body: "{{ lookup('template', kube_req_template) }}"
+ body_format: json
+ status_code: 201
+ HEADER_Authorization: "Bearer {{ kubernetes_token }}"
with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
new file mode 120000
index 000000000..49c1191bc
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
@@ -0,0 +1 @@
+../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
index b42886ef1..b42886ef1 100644
--- a/roles/kube_nfs_volumes/templates/nfs.json.j2
+++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
index 2cd00dd27..5dc3cff9b 100644
--- a/roles/lib_zabbix/library/zbx_item.py
+++ b/roles/lib_zabbix/library/zbx_item.py
@@ -107,6 +107,39 @@ def get_multiplier(inval):
return rval, 0
+def get_zabbix_type(ztype):
+ '''
+ Determine which type of discoverrule this is
+ '''
+ _types = {'agent': 0,
+ 'SNMPv1': 1,
+ 'trapper': 2,
+ 'simple': 3,
+ 'SNMPv2': 4,
+ 'internal': 5,
+ 'SNMPv3': 6,
+ 'active': 7,
+ 'aggregate': 8,
+ 'web': 9,
+ 'external': 10,
+ 'database monitor': 11,
+ 'ipmi': 12,
+ 'ssh': 13,
+ 'telnet': 14,
+ 'calculated': 15,
+ 'JMX': 16,
+ 'SNMP trap': 17,
+ }
+
+ for typ in _types.keys():
+ if ztype in typ or ztype == typ:
+ _vtype = _types[typ]
+ break
+ else:
+ _vtype = 2
+
+ return _vtype
+
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
@@ -123,7 +156,7 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
template_name=dict(default=None, type='str'),
- zabbix_type=dict(default=2, type='int'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='int', type='str'),
interval=dict(default=60, type='int'),
delta=dict(default=0, type='int'),
@@ -184,7 +217,7 @@ def main():
params = {'name': module.params.get('name', module.params['key']),
'key_': module.params['key'],
'hostid': templateid[0],
- 'type': module.params['zabbix_type'],
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(module.params['applications'], app_name_ids),
'formula': formula,
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index e7fd6fa21..43498c015 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -67,7 +67,24 @@ def get_template(zapi, template_name):
return None
return content['result'][0]
-def get_type(ztype):
+def get_multiplier(inval):
+ ''' Determine the multiplier
+ '''
+ if inval == None or inval == '':
+ return None, 0
+
+ rval = None
+ try:
+ rval = int(inval)
+ except ValueError:
+ pass
+
+ if rval:
+ return rval, 1
+
+ return rval, 0
+
+def get_zabbix_type(ztype):
'''
Determine which type of discoverrule this is
'''
@@ -87,6 +104,7 @@ def get_type(ztype):
'telnet': 14,
'calculated': 15,
'JMX': 16,
+ 'SNMP trap': 17,
}
for typ in _types.keys():
@@ -153,16 +171,21 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
description=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
- ztype=dict(default='trapper', type='str'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
status=dict(default='enabled', type='str'),
applications=dict(default=[], type='list'),
- template_name=dict(default=None, type='str'),
discoveryrule_key=dict(default=None, type='str'),
+ interval=dict(default=60, type='int'),
+ delta=dict(default=0, type='int'),
+ multiplier=dict(default=None, type='str'),
+ units=dict(default=None, type='str'),
+
),
#supports_check_mode=True
)
@@ -205,15 +228,23 @@ def main():
# Create and Update
if state == 'present':
+
+ formula, use_multiplier = get_multiplier(module.params['multiplier'])
+
params = {'name': module.params['name'],
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
- 'type': get_type(module.params['ztype']),
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
+ 'formula': formula,
+ 'multiplier': use_multiplier,
'description': module.params['description'],
+ 'units': module.params['units'],
+ 'delay': module.params['interval'],
+ 'delta': module.params['delta'],
}
if params['type'] in [2, 5, 7, 8, 11, 15]:
diff --git a/roles/lib_zabbix/library/zbx_itservice.py b/roles/lib_zabbix/library/zbx_itservice.py
new file mode 100644
index 000000000..a5ee97e15
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_itservice.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix itservices
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix itservice ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_parent(dependencies):
+ '''Put dependencies into the proper update format'''
+ rval = None
+ for dep in dependencies:
+ if dep['relationship'] == 'parent':
+ return dep
+ return rval
+
+def format_dependencies(dependencies):
+ '''Put dependencies into the proper update format'''
+ rval = []
+ for dep in dependencies:
+ rval.append({'dependsOnServiceid': dep['serviceid'],
+ 'soft': get_dependency_type(dep['dep_type']),
+ })
+
+ return rval
+
+def get_dependency_type(dep_type):
+ '''Determine the dependency type'''
+ rval = 0
+ if 'soft' == dep_type:
+ rval = 1
+
+ return rval
+
+def get_service_id_by_name(zapi, dependencies):
+ '''Fetch the service id for an itservice'''
+ deps = []
+ for dep in dependencies:
+ if dep['name'] == 'root':
+ deps.append(dep)
+ continue
+
+ content = zapi.get_content('service',
+ 'get',
+ {'filter': {'name': dep['name']},
+ 'selectDependencies': 'extend',
+ })
+ if content.has_key('result') and content['result']:
+ dep['serviceid'] = content['result'][0]['serviceid']
+ deps.append(dep)
+
+ return deps
+
+def add_dependencies(zapi, service_name, dependencies):
+ '''Fetch the service id for an itservice
+
+ Add a dependency on the parent for this current service item.
+ '''
+
+ results = get_service_id_by_name(zapi, [{'name': service_name}])
+
+ content = {}
+ for dep in dependencies:
+ content = zapi.get_content('service',
+ 'adddependencies',
+ {'serviceid': results[0]['serviceid'],
+ 'dependsOnServiceid': dep['serviceid'],
+ 'soft': get_dependency_type(dep['dep_type']),
+ })
+ if content.has_key('result') and content['result']:
+ continue
+ else:
+ break
+
+ return content
+
+def get_show_sla(inc_sla):
+ ''' Determine the showsla paramter
+ '''
+ rval = 1
+ if 'do not cacluate' in inc_sla:
+ rval = 0
+ return rval
+
+def get_algorithm(inc_algorithm_str):
+ '''
+ Determine which type algorithm
+ '''
+ rval = 0
+ if 'at least one' in inc_algorithm_str:
+ rval = 1
+ elif 'all' in inc_algorithm_str:
+ rval = 2
+
+ return rval
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_itservice
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ algorithm=dict(default='do not calculate', choices=['do not calculate', 'at least one', 'all'], type='str'),
+ show_sla=dict(default='calculate', choices=['do not calculate', 'calculate'], type='str'),
+ good_sla=dict(default='99.9', type='float'),
+ sort_order=dict(default=1, type='int'),
+ state=dict(default='present', type='str'),
+ trigger_id=dict(default=None, type='int'),
+ dependencies=dict(default=[], type='list'),
+ dep_type=dict(default='hard', choices=['hard', 'soft'], type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'service'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ 'selectDependencies': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['serviceid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ dependencies = get_service_id_by_name(zapi, module.params['dependencies'])
+ params = {'name': module.params['name'],
+ 'algorithm': get_algorithm(module.params['algorithm']),
+ 'showsla': get_show_sla(module.params['show_sla']),
+ 'goodsla': module.params['good_sla'],
+ 'sortorder': module.params['sort_order'],
+ 'triggerid': module.params['trigger_id']
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ if dependencies:
+ content = add_dependencies(zapi, module.params['name'], dependencies)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ params['dependencies'] = dependencies
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'goodsla':
+ if float(value) != float(zab_results[key]):
+ differences[key] = value
+
+ elif key == 'dependencies':
+ zab_dep_ids = [item['serviceid'] for item in zab_results[key]]
+ user_dep_ids = [item['serviceid'] for item in dependencies]
+ if set(zab_dep_ids) != set(user_dep_ids):
+ differences[key] = format_dependencies(dependencies)
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ differences['serviceid'] = zab_results['serviceid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py
index ab7731faa..b5faefa70 100644
--- a/roles/lib_zabbix/library/zbx_trigger.py
+++ b/roles/lib_zabbix/library/zbx_trigger.py
@@ -136,6 +136,8 @@ def main():
status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
template_name=dict(default=None, type='str'),
+ hostgroup_name=dict(default=None, type='str'),
+ query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
),
#supports_check_mode=True
)
@@ -157,10 +159,11 @@ def main():
content = zapi.get_content(zbx_class_name,
'get',
- {'filter': {'description': tname},
+ {module.params['query_type']: {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
'templateids': templateid,
+ 'group': module.params['hostgroup_name'],
})
# Get
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index d5168a9f4..44c4e6766 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -38,7 +38,7 @@
units: "{{ item.units | default('', True) }}"
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
- zabbix_type: "{{ item.zabbix_type | default(2, True) }}"
+ zabbix_type: "{{ item.zabbix_type | default('trapper') }}"
interval: "{{ item.interval | default(60, True) }}"
delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitems
@@ -84,6 +84,10 @@
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
+ multiplier: "{{ item.multiplier | default('', True) }}"
+ units: "{{ item.units | default('', True) }}"
+ interval: "{{ item.interval | default(60, True) }}"
+ delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitemprototypes
when: template.zitemprototypes is defined
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 5fe77e38b..f6919dada 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -1,11 +1,16 @@
---
- yum:
- name: openshift-ansible-inventory
+ name: "{{ item }}"
state: present
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
-- template:
- src: multi_ec2.yaml.j2
- dest: /etc/ansible/multi_ec2.yaml
+- name:
+ copy:
+ content: "{{ oo_inventory_accounts | to_nice_yaml }}"
+ dest: /etc/ansible/multi_inventory.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
mode: "0640"
@@ -19,17 +24,17 @@
- file:
state: link
- src: /usr/share/ansible/inventory/multi_ec2.py
- dest: /etc/ansible/inventory/multi_ec2.py
+ src: /usr/share/ansible/inventory/multi_inventory.py
+ dest: /etc/ansible/inventory/multi_inventory.py
owner: root
group: libra_ops
# This cron uses the above location to call its job
- name: Cron to keep cache fresh
cron:
- name: 'multi_ec2_inventory'
+ name: 'multi_inventory'
minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+ job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
- name: Set cache location
@@ -39,5 +44,5 @@
owner: root
group: libra_ops
recurse: yes
- mode: '2750'
+ mode: '2770'
when: oo_inventory_cache_location is defined
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
deleted file mode 100644
index 8228ab915..000000000
--- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# multi ec2 inventory configs
-cache_max_age: {{ oo_inventory_cache_max_age }}
-cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }}
-accounts:
-{% for account in oo_inventory_accounts %}
- - name: {{ account.name }}
- provider: {{ account.provider }}
- provider_config:
-{% for section, items in account.provider_config.items() %}
- {{ section }}:
-{% for property, value in items.items() %}
- {{ property }}: {{ value }}
-{% endfor %}
-{% endfor %}
- env_vars:
- AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
-{% if account.all_group is defined and account.hostvars is defined%}
- all_group: {{ account.all_group }}
- hostvars:
-{% for property, value in account.hostvars.items() %}
- {{ property }}: {{ value }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 73bd28630..38d5a08e4 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,4 +1,8 @@
---
+- fail:
+ msg: Flannel can not be used with openshift sdn
+ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+
- name: Set common Cluster facts
openshift_facts:
role: common
@@ -13,6 +17,7 @@
sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
+ use_flannel: "{{ openshift_use_flannel | default(None) }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 2043985ec..8e8bc6868 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -14,5 +14,7 @@ db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
quickstarts_base: "{{ examples_base }}/quickstart-templates"
+infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin"
+infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise"
openshift_examples_import_command: "create"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 21137e31b..a261a6ddd 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,17 +5,20 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
+XPAAS_VERSION=ose-v1.1.0
EXAMPLES_BASE=$(pwd)/files/examples
find files/examples -name '*.json' -delete
+find files/examples -name '*.yaml' -delete
TEMP=`mktemp -d`
pushd $TEMP
+
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/openshift/django-ex/archive/master.zip -O django-ex-master.zip
wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master.zip
wget https://github.com/openshift/nodejs-ex/archive/master.zip -O nodejs-ex-master.zip
wget https://github.com/openshift/dancer-ex/archive/master.zip -O dancer-ex-master.zip
wget https://github.com/openshift/cakephp-ex/archive/master.zip -O cakephp-ex-master.zip
-wget https://github.com/jboss-openshift/application-templates/archive/ose-v1.0.2.zip -O application-templates-master.zip
+wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
unzip origin-master.zip
unzip django-ex-master.zip
unzip rails-ex-master.zip
@@ -31,7 +34,13 @@ cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp nodejs-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
-mv application-templates-master/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
-find application-templates-master/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
+find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+
+wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
+cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
+
popd
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
index f213d99ca..1a78b1279 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
@@ -11,10 +11,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/ruby-20-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.0"
+ }
},
{
"name": "2.0",
@@ -23,11 +26,27 @@
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
- "version": "2.0"
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/ruby-20-centos7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "description": "Build and run Ruby 2.2 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/ruby-22-centos7:latest"
}
}
]
@@ -41,10 +60,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/nodejs-010-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "0.10"
+ }
},
{
"name": "0.10",
@@ -53,11 +75,12 @@
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
- "version": "0.10"
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/nodejs-010-centos7:latest"
}
}
]
@@ -71,10 +94,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/perl-516-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.16"
+ }
},
{
"name": "5.16",
@@ -83,12 +109,29 @@
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
- "version": "5.16"
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/perl-516-centos7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "description": "Build and run Perl 5.20 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/perl-520-centos7:latest"
}
+
}
]
}
@@ -101,10 +144,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/php-55-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
},
{
"name": "5.5",
@@ -113,11 +159,27 @@
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
- "version": "5.5"
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/php-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "description": "Build and run PHP 5.6 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/php-56-centos7:latest"
}
}
]
@@ -131,10 +193,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/python-33-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "3.3"
+ }
},
{
"name": "3.3",
@@ -143,11 +208,42 @@
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
- "version": "3.3"
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/python-33-centos7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "description": "Build and run Python 2.7 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/python-27-centos7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "description": "Build and run Python 3.4 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/python-34-centos7:latest"
}
}
]
@@ -161,10 +257,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/wildfly-81-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "8.1"
+ }
},
{
"name": "8.1",
@@ -173,11 +272,12 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
- "version": "8.1"
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/wildfly-81-centos7:latest"
}
}
]
@@ -191,16 +291,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/mysql-55-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
},
{
"name": "5.5",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/mysql-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/mysql-56-centos7:latest"
}
}
]
@@ -214,16 +324,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/postgresql-92-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "9.2"
+ }
},
{
"name": "9.2",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/postgresql-92-centos7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/postgresql-94-centos7:latest"
}
}
]
@@ -237,16 +357,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/mongodb-24-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.4"
+ }
},
{
"name": "2.4",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/mongodb-24-centos7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/mongodb-26-centos7:latest"
}
}
]
@@ -260,16 +390,19 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/jenkins-1-centos7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "1"
+ }
},
{
"name": "1",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "openshift/jenkins-1-centos7:latest"
}
}
]
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
index 8c125f76a..d2a8cfb1d 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
@@ -11,10 +11,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/ruby-20-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.0"
+ }
},
{
"name": "2.0",
@@ -23,11 +26,27 @@
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
- "version": "2.0"
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "description": "Build and run Ruby 2.2 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest"
}
}
]
@@ -41,10 +60,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/nodejs-010-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "0.10"
+ }
},
{
"name": "0.10",
@@ -53,11 +75,12 @@
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
- "version": "0.10"
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest"
}
}
]
@@ -71,10 +94,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/perl-516-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.16"
+ }
},
{
"name": "5.16",
@@ -83,12 +109,29 @@
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
- "version": "5.16"
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "description": "Build and run Perl 5.20 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
}
+
}
]
}
@@ -101,10 +144,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/php-55-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
},
{
"name": "5.5",
@@ -113,11 +159,27 @@
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
- "version": "5.5"
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "description": "Build and run PHP 5.6 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
}
}
]
@@ -131,10 +193,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/python-33-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "3.3"
+ }
},
{
"name": "3.3",
@@ -143,11 +208,42 @@
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
- "version": "3.3"
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
},
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "description": "Build and run Python 2.7 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "description": "Build and run Python 3.4 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest"
}
}
]
@@ -161,16 +257,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/mysql-55-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
},
{
"name": "5.5",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
}
}
]
@@ -184,16 +290,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/postgresql-92-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "9.2"
+ }
},
{
"name": "9.2",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest"
}
}
]
@@ -207,16 +323,26 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/mongodb-24-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.4"
+ }
},
{
"name": "2.4",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest"
}
}
]
@@ -230,16 +356,19 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/jenkins-1-rhel7",
"tags": [
{
- "name": "latest"
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "1"
+ }
},
{
"name": "1",
"from": {
- "Kind": "ImageStreamTag",
- "Name": "latest"
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
}
}
]
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..b3b60bf9b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
new file mode 100644
index 000000000..4c798e148
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
index deac2010f..da5679444 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
@@ -201,6 +201,10 @@
{
"name": "CAKEPHP_SECURITY_CIPHER_SEED",
"value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
}
]
}
@@ -364,6 +368,11 @@
"description": "Security cipher seed for session hash",
"generate": "expression",
"from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
index ec556ea13..f426e1dd6 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
@@ -190,6 +190,10 @@
{
"name": "CAKEPHP_SECURITY_CIPHER_SEED",
"value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
}
]
}
@@ -261,6 +265,11 @@
"description": "Security cipher seed for session hash",
"generate": "expression",
"from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
index 2cbcc0889..55f655102 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
@@ -175,6 +175,10 @@
{
"name": "SECRET_KEY_BASE",
"value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
}
]
}
@@ -330,6 +334,11 @@
"value": "openshift/mysql-55-centos7"
},
{
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
+ },
+ {
"name": "SECRET_KEY_BASE",
"description": "Your secret key for verifying the integrity of signed cookies",
"generate": "expression",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
index 43271dfc5..3ee19be83 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
@@ -157,6 +157,12 @@
{
"containerPort": 8080
}
+ ],
+ "env": [
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
]
}
]
@@ -195,6 +201,11 @@
"description": "Your secret key for verifying the integrity of signed cookies",
"generate": "expression",
"from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
index 017b5be19..8760b074c 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
@@ -102,6 +102,12 @@
"github": {
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
}
]
}
@@ -298,6 +304,12 @@
"from": "[a-zA-Z0-9]{40}"
},
{
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name",
"value": "mongodb"
@@ -328,7 +340,7 @@
{
"name": "MONGODB_IMAGE",
"description": "Image to use for mongodb",
- "value": "openshift/mongodb-24-centos7"
+ "value": "openshift/mongodb-24-centos7"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
index 55488ab41..e047266e3 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
@@ -102,6 +102,12 @@
"github": {
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
}
]
}
@@ -213,6 +219,12 @@
"from": "[a-zA-Z0-9]{40}"
},
{
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name"
},
diff --git a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json
index 37e6269fe..aaf5569ae 100644
--- a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json
@@ -12,19 +12,21 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver3-tomcat7-openshift"
+ "name": "jboss-webserver30-tomcat7-openshift"
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat7-openshift",
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
"tags": [
{
- "name": "3.0",
+ "name": "1.1",
"annotations": {
- "description": "JBoss Web Server v3 Tomcat 7 STI images.",
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
"iconClass": "icon-jboss",
- "tags": "java",
- "supports":"tomcat7:3.0,java",
- "version": "3.0"
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
}
}
]
@@ -34,19 +36,21 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-webserver3-tomcat8-openshift"
+ "name": "jboss-webserver30-tomcat8-openshift"
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat8-openshift",
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
"tags": [
{
- "name": "3.0",
+ "name": "1.1",
"annotations": {
- "description": "JBoss Web Server v3 Tomcat 8 STI images.",
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
"iconClass": "icon-jboss",
- "tags": "java",
- "supports":"tomcat8:3.0,java",
- "version": "3.0"
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
}
}
]
@@ -56,19 +60,22 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-eap6-openshift"
+ "name": "jboss-eap64-openshift"
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap-openshift",
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
"tags": [
{
- "name": "6.4",
+ "name": "1.1",
"annotations": {
- "description": "JBoss EAP 6 STI images.",
+ "description": "JBoss EAP 6.4 S2I images.",
"iconClass": "icon-jboss",
- "tags": "javaee",
- "supports":"eap:6.4,jee,java",
- "version": "6.4"
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.1"
}
}
]
@@ -78,19 +85,19 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jboss-amq-6"
+ "name": "jboss-amq-62"
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq-openshift",
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
"tags": [
{
- "name": "6.2",
+ "name": "1.1",
"annotations": {
- "description": "JBoss ActiveMQ 6 broker image.",
+ "description": "JBoss A-MQ 6.2 broker image.",
"iconClass": "icon-jboss",
- "tags": "javaee",
- "supports":"amq:6.2,jee,java",
- "version": "6.2"
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1"
}
}
]
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json
new file mode 100644
index 000000000..3fd04c28c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json
@@ -0,0 +1,325 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-basic"
+ },
+ "labels": {
+ "template": "amq62-basic",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json
index 5cbc7ee7e..aa9e716cf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json
@@ -3,82 +3,117 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for ActiveMQ brokers using persistent storage."
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "amq6-persistent"
+ "name": "amq62-persistent-ssl"
},
"labels": {
- "template": "amq6-persistent"
+ "template": "amq62-persistent-ssl",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "ActiveMQ Release version, e.g. 6.2, etc.",
- "name": "AMQ_RELEASE",
- "value": "6.2"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "broker"
+ "value": "broker",
+ "required": true
},
{
- "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
"name": "MQ_PROTOCOL",
- "value": "openwire"
+ "value": "openwire",
+ "required": false
},
{
- "description": "Queue names",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
- "description": "Topic names",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
- "description": "Broker user name",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "Broker user password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "ActiveMQ Admin User",
+ "description": "User name for admin user. If left empty, it will be generated.",
"name": "AMQ_ADMIN_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "ActiveMQ Admin Password",
+ "description": "Password for admin user. If left empty, it will be generated.",
"name": "AMQ_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
- "value": "amq-app-secret"
+ "value": "amq-app-secret",
+ "required": true
},
{
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
- "value": "broker.ts"
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
},
{
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
- "value": "broker.ks"
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -102,7 +137,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's amqp port."
+ "description": "The broker's AMQP port."
}
}
},
@@ -126,7 +161,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's amqp ssl port."
+ "description": "The broker's AMQP SSL port."
}
}
},
@@ -150,7 +185,31 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's mqtt port."
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
}
}
},
@@ -174,7 +233,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's stomp port."
+ "description": "The broker's STOMP port."
}
}
},
@@ -198,7 +257,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's stomp ssl port."
+ "description": "The broker's STOMP SSL port."
}
}
},
@@ -222,7 +281,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp (openwire) port."
+ "description": "The broker's OpenWire port."
}
}
},
@@ -246,7 +305,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp ssl (openwire) port."
+ "description": "The broker's OpenWire (SSL) port."
}
}
},
@@ -273,10 +332,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-amq-6:${AMQ_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -293,10 +355,11 @@
},
"spec": {
"serviceAccount": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-amq",
- "image": "jboss-amq-6",
+ "image": "jboss-amq-62",
"imagePullPolicy": "Always",
"volumeMounts": [
{
@@ -335,6 +398,11 @@
"protocol": "TCP"
},
{
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
"name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
@@ -365,7 +433,7 @@
"value": "${MQ_PASSWORD}"
},
{
- "name": "AMQ_PROTOCOLS",
+ "name": "AMQ_TRANSPORTS",
"value": "${MQ_PROTOCOL}"
},
{
@@ -393,8 +461,20 @@
"value": "${AMQ_TRUSTSTORE}"
},
{
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
"name": "AMQ_KEYSTORE",
"value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
}
]
}
@@ -427,7 +507,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json
new file mode 100644
index 000000000..3a2db3ce9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json
@@ -0,0 +1,343 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-persistent"
+ },
+ "labels": {
+ "template": "amq62-persistent",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json
index 7decdfe52..f61fb24c2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json
@@ -3,77 +3,117 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for ActiveMQ brokers."
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "amq6"
+ "name": "amq62-ssl"
},
"labels": {
- "template": "amq6"
+ "template": "amq62-ssl",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "ActiveMQ Release version, e.g. 6.2, etc.",
- "name": "AMQ_RELEASE",
- "value": "6.2"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "broker"
+ "value": "broker",
+ "required": true
},
{
- "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP",
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
"name": "MQ_PROTOCOL",
- "value": "openwire"
+ "value": "openwire",
+ "required": false
},
{
- "description": "Queue names",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
"name": "MQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
- "description": "Topic names",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
"name": "MQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
- "description": "Broker user name",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "Broker user password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "ActiveMQ Admin User",
+ "description": "User name for admin user. If left empty, it will be generated.",
"name": "AMQ_ADMIN_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "ActiveMQ Admin Password",
+ "description": "Password for admin user. If left empty, it will be generated.",
"name": "AMQ_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
- "value": "amq-app-secret"
+ "value": "amq-app-secret",
+ "required": true
},
{
"description": "SSL trust store filename",
"name": "AMQ_TRUSTSTORE",
- "value": "broker.ts"
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
},
{
"description": "SSL key store filename",
"name": "AMQ_KEYSTORE",
- "value": "broker.ks"
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -97,7 +137,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's amqp port."
+ "description": "The broker's AMQP port."
}
}
},
@@ -121,7 +161,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's amqp ssl port."
+ "description": "The broker's AMQP SSL port."
}
}
},
@@ -145,7 +185,31 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's mqtt port."
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
}
}
},
@@ -169,7 +233,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's stomp port."
+ "description": "The broker's STOMP port."
}
}
},
@@ -193,7 +257,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's stomp ssl port."
+ "description": "The broker's STOMP SSL port."
}
}
},
@@ -217,7 +281,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp (openwire) port."
+ "description": "The broker's OpenWire port."
}
}
},
@@ -241,7 +305,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp ssl (openwire) port."
+ "description": "The broker's OpenWire (SSL) port."
}
}
},
@@ -268,10 +332,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-amq-6:${AMQ_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -288,10 +355,11 @@
},
"spec": {
"serviceAccount": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-amq",
- "image": "jboss-amq-6",
+ "image": "jboss-amq-62",
"imagePullPolicy": "Always",
"volumeMounts": [
{
@@ -326,6 +394,11 @@
"protocol": "TCP"
},
{
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
"name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
@@ -356,7 +429,7 @@
"value": "${MQ_PASSWORD}"
},
{
- "name": "AMQ_PROTOCOLS",
+ "name": "AMQ_TRANSPORTS",
"value": "${MQ_PROTOCOL}"
},
{
@@ -376,10 +449,22 @@
"value": "${AMQ_ADMIN_PASSWORD}"
},
{
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
"name": "AMQ_MESH_SERVICE_NAME",
"value": "${APPLICATION_NAME}-amq-tcp"
},
{
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
"name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
"value": "/etc/amq-secret-volume"
},
@@ -388,8 +473,20 @@
"value": "${AMQ_TRUSTSTORE}"
},
{
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
"name": "AMQ_KEYSTORE",
"value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
}
]
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json
index b64acae8b..2fc3b5b25 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -3,129 +3,149 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 A-MQ applications with persistent storage built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-amq-persistent-sti"
+ "name": "eap64-amq-persistent-s2i"
},
"labels": {
- "template": "eap6-amq-persistent-sti"
+ "template": "eap64-amq-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
- "description": "ActiveMQ Release version, e.g. 6.2, etc.",
- "name": "AMQ_RELEASE",
- "value": "6.2"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
- "value": "java:/ConnectionFactory"
+ "value": "java:/ConnectionFactory",
+ "required": false
},
{
- "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
- "value": "openwire"
+ "value": "openwire",
+ "required": false
},
{
- "description": "Queue names",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
- "value": ""
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
},
{
- "description": "Topic names",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
- "value": ""
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
- "description": "Broker user name",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "Broker user password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "ActiveMQ Admin User",
+ "description": "User name for broker admin. If left empty, it will be generated.",
"name": "AMQ_ADMIN_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "ActiveMQ Admin Password",
+ "description": "Password for broker admin. If left empty, it will be generated.",
"name": "AMQ_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -149,7 +169,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's HTTP port."
}
}
},
@@ -173,32 +193,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
+ "description": "The web server's HTTPS port."
}
}
},
@@ -222,25 +217,25 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp (openwire) port."
+ "description": "The broker's OpenWire port."
}
}
},
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Route for application's http service."
- }
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -249,23 +244,23 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Route for application's https service."
+ "description": "Route for application's HTTPS service."
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -292,18 +287,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -317,18 +313,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -359,6 +358,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -375,6 +377,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -443,12 +446,16 @@
"value": "${MQ_TOPICS}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -504,10 +511,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-amq-6:${AMQ_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -523,10 +533,11 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-amq",
- "image": "jboss-amq-6",
+ "image": "jboss-amq-62",
"imagePullPolicy": "Always",
"readinessProbe": {
"exec": {
@@ -590,7 +601,7 @@
"value": "${MQ_PASSWORD}"
},
{
- "name": "AMQ_PROTOCOLS",
+ "name": "AMQ_TRANSPORTS",
"value": "${MQ_PROTOCOL}"
},
{
@@ -634,7 +645,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json
index 20b234bd0..a420bb1ea 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json
@@ -3,124 +3,143 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 A-MQ applications built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-amq-sti"
+ "name": "eap64-amq-s2i"
},
"labels": {
- "template": "eap6-amq-sti"
+ "template": "eap64-amq-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
- "description": "ActiveMQ Release version, e.g. 6.2, etc.",
- "name": "AMQ_RELEASE",
- "value": "6.2"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
},
{
"description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
"name": "MQ_JNDI",
- "value": "java:/ConnectionFactory"
+ "value": "java:/ConnectionFactory",
+ "required": false
},
{
- "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
- "value": "openwire"
+ "value": "openwire",
+ "required": false
},
{
- "description": "Queue names",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_QUEUES",
- "value": ""
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
},
{
- "description": "Topic names",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
"name": "MQ_TOPICS",
- "value": ""
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
- "description": "Broker user name",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "Broker user password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
"name": "MQ_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": false
},
{
- "description": "ActiveMQ Admin User",
+ "description": "User name for broker admin. If left empty, it will be generated.",
"name": "AMQ_ADMIN_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "ActiveMQ Admin Password",
+ "description": "Password for broker admin. If left empty, it will be generated.",
"name": "AMQ_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -144,7 +163,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's http port."
+ "description": "The web server's HTTP port."
}
}
},
@@ -168,32 +187,7 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The web server's https port."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
+ "description": "The web server's HTTPS port."
}
}
},
@@ -217,25 +211,25 @@
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "The broker's tcp (openwire) port."
+ "description": "The broker's OpenWire port."
}
}
},
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Route for application's http service."
- }
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -244,23 +238,23 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
"annotations": {
- "description": "Route for application's https service."
+ "description": "Route for application's HTTPS service."
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -287,18 +281,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -312,18 +307,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -354,6 +352,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -370,6 +371,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -438,12 +440,16 @@
"value": "${MQ_TOPICS}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -499,10 +505,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-amq-6:${AMQ_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -518,10 +527,11 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-amq",
- "image": "jboss-amq-6",
+ "image": "jboss-amq-62",
"imagePullPolicy": "Always",
"readinessProbe": {
"exec": {
@@ -579,7 +589,7 @@
"value": "${MQ_PASSWORD}"
},
{
- "name": "AMQ_PROTOCOLS",
+ "name": "AMQ_TRANSPORTS",
"value": "${MQ_PROTOCOL}"
},
{
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json
index 146bfb1ee..3f90eb8be 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json
@@ -3,72 +3,86 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
- "description": "Application template for EAP 6 applications built using STI."
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-basic-sti"
+ "name": "eap64-basic-s2i"
},
"labels": {
- "template": "eap6-basic-sti"
+ "template": "eap64-basic-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI",
- "value": "https://github.com/jboss-developer/jboss-eap-quickstarts"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "6.4.x"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": "kitchensink"
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -97,36 +111,11 @@
}
},
{
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -135,7 +124,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -164,18 +153,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -189,18 +179,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -231,6 +224,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -246,6 +242,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -274,12 +271,16 @@
],
"env": [
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "HORNETQ_CLUSTER_PASSWORD",
@@ -301,4 +302,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json
index 5df36ccc2..220d2f5b9 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json
@@ -3,92 +3,110 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-jboss",
- "description": "Application template for EAP 6 applications built using STI."
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-https-sti"
+ "name": "eap64-https-s2i"
},
"labels": {
- "template": "eap6-https-sti"
+ "template": "eap64-https-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI",
- "value": "https://github.com/jboss-developer/jboss-eap-quickstarts"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "6.4.x"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": "kitchensink"
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": true
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -141,36 +159,11 @@
}
},
{
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -179,7 +172,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -188,9 +181,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -199,12 +192,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -231,18 +224,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -256,18 +250,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -298,6 +295,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -314,6 +314,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -354,12 +355,16 @@
],
"env": [
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -405,4 +410,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 289ab284f..a1a3a9f2c 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -3,148 +3,179 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 MongDB applications with persistent storage built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 MongDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-mongodb-persistent-sti"
+ "name": "eap64-mongodb-persistent-s2i"
},
"labels": {
- "template": "eap6-mongodb-persistent-sti"
+ "template": "eap64-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -202,31 +233,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 27017,
"targetPort": 27017
}
@@ -248,9 +254,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -259,7 +265,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -268,9 +274,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -279,12 +285,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -311,18 +317,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -336,18 +343,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -378,6 +388,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -394,6 +407,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -470,12 +484,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -543,10 +561,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -562,6 +583,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -574,11 +596,11 @@
}
],
"volumeMounts": [
- {
- "mountPath": "/var/lib/mongodb/data",
- "name": "${APPLICATION_NAME}-mongodb-pvol"
- }
- ],
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
"env": [
{
"name": "MONGODB_USER",
@@ -633,7 +655,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -642,4 +666,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json
index 22b301aa9..dfd1443ed 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json
@@ -3,143 +3,173 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 MongDB applications built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 MongDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-mongodb-sti"
+ "name": "eap64-mongodb-s2i"
},
"labels": {
- "template": "eap6-mongodb-sti"
+ "template": "eap64-mongodb-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -197,31 +227,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 27017,
"targetPort": 27017
}
@@ -243,9 +248,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -254,7 +259,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -263,9 +268,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -274,12 +279,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -306,18 +311,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -331,18 +337,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -373,6 +382,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -389,6 +401,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -465,12 +478,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -538,10 +555,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -557,6 +577,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -605,4 +626,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json
index 648a53199..fdd368a5f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -3,150 +3,182 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 MySQL applications with persistent storage built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-mysql-persistent-sti"
+ "name": "eap64-mysql-persistent-s2i"
},
"labels": {
- "template": "eap6-mysql-persistent-sti"
+ "template": "eap64-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -204,31 +236,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 3306,
"targetPort": 3306
}
@@ -250,9 +257,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -261,7 +268,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -270,9 +277,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -281,12 +288,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -313,18 +320,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -338,18 +346,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -380,6 +391,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -396,6 +410,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -472,12 +487,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -545,10 +564,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -564,6 +586,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -639,7 +662,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -648,4 +673,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json
index 83d5c8b18..ff6bdc112 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json
@@ -3,145 +3,176 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 MySQL applications built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-mysql-sti"
+ "name": "eap64-mysql-s2i"
},
"labels": {
- "template": "eap6-mysql-sti"
+ "template": "eap64-mysql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -199,31 +230,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 3306,
"targetPort": 3306
}
@@ -245,9 +251,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -256,7 +262,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -265,9 +271,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -276,12 +282,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -308,18 +314,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -333,18 +340,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -375,6 +385,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -391,6 +404,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -467,12 +481,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -540,10 +558,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -559,6 +580,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -611,4 +633,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 53b953b7e..6443afdb0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -3,138 +3,167 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-postgresql-persistent-sti"
+ "name": "eap64-postgresql-persistent-s2i"
},
"labels": {
- "template": "eap6-postgresql-persistent-sti"
+ "template": "eap64-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -192,31 +221,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 5432,
"targetPort": 5432
}
@@ -238,9 +242,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -249,7 +253,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -258,9 +262,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -269,12 +273,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -301,18 +305,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -326,18 +331,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -368,6 +376,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -384,6 +395,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -460,12 +472,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -533,10 +549,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -552,6 +571,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-postgresql",
@@ -564,10 +584,10 @@
}
],
"volumeMounts": [
- {
- "mountPath": "/var/lib/pgsql/data",
- "name": "${APPLICATION_NAME}-postgresql-pvol"
- }
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
],
"env": [
{
@@ -615,7 +635,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -624,4 +646,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json
index 9d660cb42..e879e51cf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json
@@ -3,133 +3,161 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for EAP 6 PostgreSQL applications built using STI.",
- "iconClass" : "icon-jboss"
+ "description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "eap6-postgresql-sti"
+ "name": "eap64-postgresql-s2i"
},
"labels": {
- "template": "eap6-postgresql-sti"
+ "template": "eap64-postgresql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "EAP Release version, e.g. 6.4, etc.",
- "name": "EAP_RELEASE",
- "value": "6.4"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "eap-app"
+ "value": "eap-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Queue names",
"name": "HORNETQ_QUEUES",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Topic names",
"name": "HORNETQ_TOPICS",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The name of the secret containing the keystore file",
"name": "EAP_HTTPS_SECRET",
- "value": "eap-app-secret"
+ "value": "eap-app-secret",
+ "required": false
},
{
"description": "The name of the keystore file within the secret",
"name": "EAP_HTTPS_KEYSTORE",
- "value": "keystore.jks"
+ "value": "keystore.jks",
+ "required": false
},
{
"description": "The name associated with the server certificate",
"name": "EAP_HTTPS_NAME",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "The password for the keystore and certificate",
"name": "EAP_HTTPS_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "HornetQ cluster admin password",
"name": "HORNETQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -187,31 +215,6 @@
"spec": {
"ports": [
{
- "port": 8888,
- "targetPort": 8888
- }
- ],
- "portalIP": "None",
- "selector": {
- "deploymentConfig": "${APPLICATION_NAME}"
- }
- },
- "metadata": {
- "name": "${APPLICATION_NAME}-ping",
- "labels": {
- "application": "${APPLICATION_NAME}"
- },
- "annotations": {
- "description": "Ping service for clustered applications."
- }
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "spec": {
- "ports": [
- {
"port": 5432,
"targetPort": 5432
}
@@ -233,9 +236,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -244,7 +247,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -253,9 +256,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -264,12 +267,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -296,18 +299,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-eap6-openshift:${EAP_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
}
}
},
@@ -321,18 +325,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -363,6 +370,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -379,6 +389,7 @@
},
"spec": {
"serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -455,12 +466,16 @@
"value": "${DB_TX_ISOLATION}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
- "value": "${APPLICATION_NAME}-ping"
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
},
{
- "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
- "value": "8888"
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
},
{
"name": "EAP_HTTPS_KEYSTORE_DIR",
@@ -528,10 +543,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -547,6 +565,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-postgresql",
@@ -587,4 +606,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 3c7812b69..729079130 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -3,67 +3,81 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-basic-sti"
+ "name": "jws30-tomcat7-basic-s2i"
},
"labels": {
- "template": "jws-tomcat8-basic-sti"
+ "template": "jws30-tomcat7-basic-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -94,9 +108,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -105,7 +119,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -134,18 +148,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -159,18 +174,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -201,6 +219,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -216,6 +237,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json
index d725e0606..7ce7e7fe2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -3,87 +3,105 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-basic-sti"
+ "name": "jws30-tomcat7-https-s2i"
},
"labels": {
- "template": "jws-tomcat8-basic-sti"
+ "template": "jws30-tomcat7-https-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -138,9 +156,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -149,7 +167,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -158,9 +176,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -169,12 +187,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -201,18 +219,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -226,18 +245,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -268,6 +290,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -284,6 +309,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 0c7b7d8e3..9a08ec0b0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -3,144 +3,174 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MongoDB applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-mongodb-persistent-sti"
+ "name": "jws30-tomcat7-mongodb-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-mongodb-persistent-sti"
+ "template": "jws30-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -219,9 +249,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -230,7 +260,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -239,9 +269,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -250,12 +280,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -282,18 +312,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -307,18 +338,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -349,6 +383,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -365,6 +402,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -497,10 +535,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -516,6 +557,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -528,11 +570,11 @@
}
],
"volumeMounts": [
- {
- "mountPath": "/var/lib/mongodb/data",
- "name": "${APPLICATION_NAME}-mongodb-pvol"
- }
- ],
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
"env": [
{
"name": "MONGODB_USER",
@@ -587,7 +629,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -596,4 +640,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index cf35d0024..b8dfb3ad3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -3,144 +3,168 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MongoDB applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-mongodb-persistent-sti"
+ "name": "jws30-tomcat7-mongodb-s2i"
},
"labels": {
- "template": "jws-tomcat8-mongodb-persistent-sti"
+ "template": "jws30-tomcat7-mongodb-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
- },
- {
- "description": "Size of persistent storage for database volume.",
- "name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -219,9 +243,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -230,7 +254,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -239,9 +263,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -250,12 +274,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -282,18 +306,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -307,18 +332,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -349,6 +377,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -365,6 +396,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -497,10 +529,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -516,6 +551,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -527,12 +563,6 @@
"protocol": "TCP"
}
],
- "volumeMounts": [
- {
- "mountPath": "/var/lib/mongodb/data",
- "name": "${APPLICATION_NAME}-mongodb-pvol"
- }
- ],
"env": [
{
"name": "MONGODB_USER",
@@ -564,36 +594,10 @@
}
]
}
- ],
- "volumes": [
- {
- "name": "${APPLICATION_NAME}-mongodb-pvol",
- "persistentVolumeClaim": {
- "claimName": "${APPLICATION_NAME}-mongodb-claim"
- }
- }
]
}
}
}
- },
- {
- "apiVersion": "v1",
- "kind": "PersistentVolumeClaim",
- "metadata": {
- "name": "${APPLICATION_NAME}-mongodb-claim",
- "labels": {
- "application": "${APPLICATION_NAME}"
- }
- },
- "spec": {
- "accessModes": [ "ReadWriteOnce" ],
- "resources": {
- "requests": {
- "storage": "${VOLUME_CAPACITY}"
- }
- }
- }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 547449010..d36e330d3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -3,146 +3,177 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MySQL applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-mysql-persistent-sti"
+ "name": "jws30-tomcat7-mysql-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-mysql-persistent-sti"
+ "template": "jws30-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -221,9 +252,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -232,7 +263,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -241,9 +272,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -252,12 +283,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -284,18 +315,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -309,18 +341,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -351,6 +386,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -367,6 +405,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -495,10 +534,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -514,6 +556,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -525,11 +568,11 @@
}
],
"volumeMounts": [
- {
- "mountPath": "/var/lib/mysql/data",
- "name": "${APPLICATION_NAME}-mysql-pvol"
- }
- ],
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
"env": [
{
"name": "MYSQL_USER",
@@ -588,7 +631,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -597,4 +642,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index 0692817bf..f5309db60 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -3,146 +3,171 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MySQL applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-mysql-persistent-sti"
+ "name": "jws30-tomcat7-mysql-s2i"
},
"labels": {
- "template": "jws-tomcat8-mysql-persistent-sti"
+ "template": "jws30-tomcat7-mysql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
- },
- {
- "description": "Size of persistent storage for database volume.",
- "name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -221,9 +246,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -232,7 +257,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -241,9 +266,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -252,12 +277,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -284,18 +309,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -309,18 +335,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -351,6 +380,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -367,6 +399,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -495,10 +528,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -514,6 +550,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -524,12 +561,6 @@
"protocol": "TCP"
}
],
- "volumeMounts": [
- {
- "mountPath": "/var/lib/mysql/data",
- "name": "${APPLICATION_NAME}-mysql-pvol"
- }
- ],
"env": [
{
"name": "MYSQL_USER",
@@ -565,36 +596,10 @@
}
]
}
- ],
- "volumes": [
- {
- "name": "${APPLICATION_NAME}-mysql-pvol",
- "persistentVolumeClaim": {
- "claimName": "${APPLICATION_NAME}-mysql-claim"
- }
- }
]
}
}
}
- },
- {
- "apiVersion": "v1",
- "kind": "PersistentVolumeClaim",
- "metadata": {
- "name": "${APPLICATION_NAME}-mysql-claim",
- "labels": {
- "application": "${APPLICATION_NAME}"
- }
- },
- "spec": {
- "accessModes": [ "ReadWriteOnce" ],
- "resources": {
- "requests": {
- "storage": "${VOLUME_CAPACITY}"
- }
- }
- }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b871b48d0..ee88a4c69 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -3,134 +3,162 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-postgresql-persistent-sti"
+ "name": "jws30-tomcat7-postgresql-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-postgresql-persistent-sti"
+ "template": "jws30-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "Size of persistent storage for database volume.",
"name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -209,9 +237,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -220,7 +248,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -229,9 +257,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -240,12 +268,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -272,18 +300,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -297,18 +326,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -339,6 +371,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -355,6 +390,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -483,10 +519,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -502,6 +541,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-postgresql",
@@ -513,11 +553,11 @@
}
],
"volumeMounts": [
- {
- "mountPath": "/var/lib/pgsql/data",
- "name": "${APPLICATION_NAME}-postgresql-pvol"
- }
- ],
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
"env": [
{
"name": "POSTGRESQL_USER",
@@ -564,7 +604,9 @@
}
},
"spec": {
- "accessModes": [ "ReadWriteOnce" ],
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
"resources": {
"requests": {
"storage": "${VOLUME_CAPACITY}"
@@ -573,4 +615,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index b46f23225..f5940a7a1 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -3,129 +3,156 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS PostgreSQL applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-postgresql-sti"
+ "name": "jws30-tomcat7-postgresql-s2i"
},
"labels": {
- "template": "jws-tomcat8-postgresql-sti"
+ "template": "jws30-tomcat7-postgresql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -204,9 +231,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -215,7 +242,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -224,9 +251,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -235,12 +262,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -267,18 +294,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
}
}
},
@@ -292,18 +320,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -334,6 +365,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -350,6 +384,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -478,10 +513,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -497,6 +535,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-postgresql",
@@ -536,4 +575,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json
index d74c2dfe3..b24ce40ae 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -3,67 +3,81 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-basic-sti"
+ "name": "jws30-tomcat8-basic-s2i"
},
"labels": {
- "template": "jws-tomcat7-basic-sti"
+ "template": "jws30-tomcat8-basic-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -94,9 +108,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -105,7 +119,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -134,18 +148,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -159,18 +174,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -201,6 +219,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -216,6 +237,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json
index b94142135..7e788d0db 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -3,87 +3,105 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-basic-sti"
+ "name": "jws30-tomcat8-https-s2i"
},
"labels": {
- "template": "jws-tomcat7-basic-sti"
+ "template": "jws30-tomcat8-https-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -138,9 +156,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -149,7 +167,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -158,9 +176,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -169,12 +187,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -201,18 +219,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -226,18 +245,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -268,6 +290,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -284,6 +309,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 892f27fe3..2f1d69c75 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -3,139 +3,174 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MongoDB applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-mongodb-sti"
+ "name": "jws30-tomcat8-mongodb-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-mongodb-sti"
+ "template": "jws30-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -214,9 +249,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -225,7 +260,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -234,9 +269,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -245,12 +280,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -277,18 +312,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -302,18 +338,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -344,6 +383,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -360,6 +402,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -492,10 +535,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -511,6 +557,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -522,6 +569,12 @@
"protocol": "TCP"
}
],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
"env": [
{
"name": "MONGODB_USER",
@@ -553,10 +606,38 @@
}
]
}
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
]
}
}
}
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index a993024f4..bad676f2e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -3,139 +3,168 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MongoDB applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-mongodb-sti"
+ "name": "jws30-tomcat8-mongodb-s2i"
},
"labels": {
- "template": "jws-tomcat8-mongodb-sti"
+ "template": "jws30-tomcat8-mongodb-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Disable data file preallocation.",
- "name": "MONGODB_NOPREALLOC"
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
},
{
"description": "Set MongoDB to use a smaller default data file size.",
- "name": "MONGODB_SMALLFILES"
+ "name": "MONGODB_SMALLFILES",
+ "required": false
},
{
"description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
- "name": "MONGODB_QUIET"
+ "name": "MONGODB_QUIET",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database admin password",
"name": "DB_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -214,9 +243,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -225,7 +254,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -234,9 +263,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -245,12 +274,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -277,18 +306,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -302,18 +332,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -344,6 +377,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -360,6 +396,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -492,10 +529,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mongodb:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -511,6 +551,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mongodb",
@@ -559,4 +600,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 2ae59ec71..e20a45982 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -3,141 +3,177 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MySQL applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-mysql-sti"
+ "name": "jws30-tomcat8-mysql-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-mysql-sti"
+ "template": "jws30-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -216,9 +252,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -227,7 +263,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -236,9 +272,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -247,12 +283,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -279,18 +315,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -304,18 +341,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -346,6 +386,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -362,6 +405,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -490,10 +534,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -509,6 +556,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -519,6 +567,12 @@
"protocol": "TCP"
}
],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
"env": [
{
"name": "MYSQL_USER",
@@ -554,10 +608,38 @@
}
]
}
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
]
}
}
}
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index 226a983b7..1b9624756 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -3,141 +3,171 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS MySQL applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-mysql-sti"
+ "name": "jws30-tomcat8-mysql-s2i"
},
"labels": {
- "template": "jws-tomcat8-mysql-sti"
+ "template": "jws30-tomcat8-mysql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "Sets how the table names are stored and compared.",
- "name": "MYSQL_LOWER_CASE_TABLE_NAMES"
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
},
{
"description": "The maximum permitted number of simultaneous client connections.",
- "name": "MYSQL_MAX_CONNECTIONS"
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "The minimum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MIN_WORD_LEN"
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
},
{
"description": "The maximum length of the word to be included in a FULLTEXT index.",
- "name": "MYSQL_FT_MAX_WORD_LEN"
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
},
{
"description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
- "name": "MYSQL_AIO"
+ "name": "MYSQL_AIO",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -216,9 +246,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -227,7 +257,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -236,9 +266,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -247,12 +277,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -279,18 +309,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -304,18 +335,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -346,6 +380,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -362,6 +399,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -490,10 +528,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "mysql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -509,6 +550,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-mysql",
@@ -560,4 +602,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 384ff1b8f..dc492a38e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -3,129 +3,162 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS PostgreSQL applications built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat7-postgresql-sti"
+ "name": "jws30-tomcat8-postgresql-persistent-s2i"
},
"labels": {
- "template": "jws-tomcat7-postgresql-sti"
+ "template": "jws30-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -204,9 +237,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -215,7 +248,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -224,9 +257,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -235,12 +268,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -267,18 +300,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -292,18 +326,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -334,6 +371,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -350,6 +390,7 @@
},
"spec": {
"serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}",
@@ -478,10 +519,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -497,6 +541,7 @@
}
},
"spec": {
+ "terminationGracePeriodSeconds": 60,
"containers": [
{
"name": "${APPLICATION_NAME}-postgresql",
@@ -507,6 +552,12 @@
"protocol": "TCP"
}
],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
"env": [
{
"name": "POSTGRESQL_USER",
@@ -530,10 +581,38 @@
}
]
}
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
]
}
}
}
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index b4644ac08..242b37a79 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -3,134 +3,156 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "iconClass" : "icon-tomcat",
- "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI."
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
},
- "name": "jws-tomcat8-postgresql-persistent-sti"
+ "name": "jws30-tomcat8-postgresql-s2i"
},
"labels": {
- "template": "jws-tomcat8-postgresql-persistent-sti"
+ "template": "jws30-tomcat8-postgresql-s2i",
+ "xpaas": "1.1.0"
},
"parameters": [
{
- "description": "JWS Release version, e.g. 3.0, 2.1, etc.",
- "name": "JWS_RELEASE",
- "value": "3.0"
- },
- {
"description": "The name for the application.",
"name": "APPLICATION_NAME",
- "value": "jws-app"
+ "value": "jws-app",
+ "required": true
},
{
"description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
- "name": "APPLICATION_HOSTNAME",
- "value": ""
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
},
{
"description": "Git source URI for application",
- "name": "GIT_URI"
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
},
{
"description": "Git branch/tag reference",
- "name": "GIT_REF",
- "value": "master"
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
},
{
"description": "Path within Git project to build; empty for root project directory.",
- "name": "GIT_CONTEXT_DIR",
- "value": ""
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
},
{
"description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
"name": "DB_JNDI",
- "value": ""
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
},
{
"description": "Database name",
"name": "DB_DATABASE",
- "value": "root"
- },
- {
- "description": "Size of persistent storage for database volume.",
- "name": "VOLUME_CAPACITY",
- "value": "512Mi"
+ "value": "root",
+ "required": true
},
{
"description": "The name of the secret containing the certificate files",
"name": "JWS_HTTPS_SECRET",
- "value": "jws-app-secret"
+ "value": "jws-app-secret",
+ "required": true
},
{
"description": "The name of the certificate file within the secret",
"name": "JWS_HTTPS_CERTIFICATE",
- "value": "server.crt"
+ "value": "server.crt",
+ "required": false
},
{
"description": "The name of the certificate key file within the secret",
"name": "JWS_HTTPS_CERTIFICATE_KEY",
- "value": "server.key"
+ "value": "server.key",
+ "required": false
},
{
"description": "The certificate password",
"name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
- "value": ""
+ "value": "",
+ "required": false
},
{
"description": "Sets xa-pool/min-pool-size for the configured datasource.",
- "name": "DB_MIN_POOL_SIZE"
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
},
{
"description": "Sets xa-pool/max-pool-size for the configured datasource.",
- "name": "DB_MAX_POOL_SIZE"
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
},
{
"description": "Sets transaction-isolation for the configured datasource.",
- "name": "DB_TX_ISOLATION"
+ "name": "DB_TX_ISOLATION",
+ "required": false
},
{
"description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
- "name": "POSTGRESQL_MAX_CONNECTIONS"
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
},
{
"description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
- "name": "POSTGRESQL_SHARED_BUFFERS"
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
},
{
"description": "Database user name",
"name": "DB_USERNAME",
"from": "user[a-zA-Z0-9]{3}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Database user password",
"name": "DB_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin User",
"name": "JWS_ADMIN_USERNAME",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "JWS Admin Password",
"name": "JWS_ADMIN_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
- "description": "Github trigger secret",
- "name": "GITHUB_TRIGGER_SECRET",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
},
{
"description": "Generic build trigger secret",
- "name": "GENERIC_TRIGGER_SECRET",
+ "name": "GENERIC_WEBHOOK_SECRET",
"from": "[a-zA-Z0-9]{8}",
- "generate": "expression"
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
}
],
"objects": [
@@ -209,9 +231,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-http-route",
+ "id": "${APPLICATION_NAME}-http",
"metadata": {
- "name": "${APPLICATION_NAME}-http-route",
+ "name": "${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -220,7 +242,7 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "${APPLICATION_NAME}"
}
@@ -229,9 +251,9 @@
{
"kind": "Route",
"apiVersion": "v1",
- "id": "${APPLICATION_NAME}-https-route",
+ "id": "${APPLICATION_NAME}-https",
"metadata": {
- "name": "${APPLICATION_NAME}-https-route",
+ "name": "secure-${APPLICATION_NAME}",
"labels": {
"application": "${APPLICATION_NAME}"
},
@@ -240,12 +262,12 @@
}
},
"spec": {
- "host": "${APPLICATION_HOSTNAME}",
+ "host": "${APPLICATION_DOMAIN}",
"to": {
"name": "secure-${APPLICATION_NAME}"
},
"tls": {
- "termination" : "passthrough"
+ "termination": "passthrough"
}
}
},
@@ -272,18 +294,19 @@
"source": {
"type": "Git",
"git": {
- "uri": "${GIT_URI}",
- "ref": "${GIT_REF}"
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
},
- "contextDir":"${GIT_CONTEXT_DIR}"
+ "contextDir": "${CONTEXT_DIR}"
},
"strategy": {
"type": "Source",
"sourceStrategy": {
+ "forcePull": true,
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
- "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}"
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
}
}
},
@@ -297,18 +320,21 @@
{
"type": "GitHub",
"github": {
- "secret": "${GITHUB_TRIGGER_SECRET}"
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
}
},
{
"type": "Generic",
"generic": {
- "secret": "${GENERIC_TRIGGER_SECRET}"
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
}
},
{
"type": "ImageChange",
"imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
}
]
}
@@ -339,6 +365,9 @@
"name": "${APPLICATION_NAME}"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -483,10 +512,13 @@
],
"from": {
"kind": "ImageStreamTag",
- "namespace": "openshift",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
"name": "postgresql:latest"
}
}
+ },
+ {
+ "type": "ConfigChange"
}
],
"replicas": 1,
@@ -512,12 +544,6 @@
"protocol": "TCP"
}
],
- "volumeMounts": [
- {
- "mountPath": "/var/lib/pgsql/data",
- "name": "${APPLICATION_NAME}-postgresql-pvol"
- }
- ],
"env": [
{
"name": "POSTGRESQL_USER",
@@ -541,36 +567,10 @@
}
]
}
- ],
- "volumes": [
- {
- "name": "${APPLICATION_NAME}-postgresql-pvol",
- "persistentVolumeClaim": {
- "claimName": "${APPLICATION_NAME}-postgresql-claim"
- }
- }
]
}
}
}
- },
- {
- "apiVersion": "v1",
- "kind": "PersistentVolumeClaim",
- "metadata": {
- "name": "${APPLICATION_NAME}-postgresql-claim",
- "labels": {
- "application": "${APPLICATION_NAME}"
- }
- },
- "spec": {
- "accessModes": [ "ReadWriteOnce" ],
- "resources": {
- "requests": {
- "storage": "${VOLUME_CAPACITY}"
- }
- }
- }
}
]
-}
+} \ No newline at end of file
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 40b7a5d6e..0b4784bae 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -37,6 +37,72 @@
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
changed_when: false
+- name: Import origin infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }}
+ when: openshift_examples_load_centos | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
+- name: Import enterprise infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }}
+ when: openshift_examples_load_rhel | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
+# The 1.1 release of the xpaas content for OpenShift renamed all the templates
+- name: Remove old xpaas templates from filesystem
+ file:
+ path: "{{ xpaas_templates_base }}/{{ item }}"
+ state: absent
+ with_items:
+ - amq6-persistent.json
+ - amq6.json
+ - eap6-amq-persistent-sti.json
+ - eap6-amq-sti.json
+ - eap6-basic-sti.json
+ - eap6-https-sti.json
+ - eap6-mongodb-persistent-sti.json
+ - eap6-mongodb-sti.json
+ - eap6-mysql-persistent-sti.json
+ - eap6-mysql-sti.json
+ - eap6-postgresql-persistent-sti.json
+ - eap6-postgresql-sti.json
+ - jws-tomcat7-basic-sti.json
+ - jws-tomcat7-https-sti.json
+ - jws-tomcat7-mongodb-sti.json
+ - jws-tomcat7-mongodb-persistent-sti.json
+ - jws-tomcat7-mysql-persistent-sti.json
+ - jws-tomcat7-mysql-sti.json
+ - jws-tomcat7-postgresql-persistent-sti.json
+ - jws-tomcat8-postgresql-persistent-sti.json
+ - jws-tomcat8-basic-sti.json
+ - jws-tomcat8-https-sti.json
+ - jws-tomcat8-mongodb-sti.json
+ - jws-tomcat8-mongodb-persistent-sti.json
+ - jws-tomcat8-mysql-sti.json
+ - jws-tomcat8-mysql-persistent-sti.json
+ - jws-tomcat8-postgresql-sti.json
+ - jws-tomcat7-postgresql-sti.json
+
+- name: Remove old xpaas templates from openshift namespace
+ command: >
+ {{ openshift.common.client_binary }} -n openshift delete
+ templates/amq6 templates/amq6-persistent templates/eap6-amq-persistent-sti templates/eap6-amq-sti \
+ templates/eap6-basic-sti templates/eap6-basic-sti templates/eap6-mongodb-persistent-sti templates/eap6-mongodb-sti \
+ templates/eap6-mysql-persistent-sti templates/eap6-mysql-sti templates/eap6-postgresql-persistent-sti \
+ templates/eap6-postgresql-sti templates/jws-tomcat7-basic-sti templates/jws-tomcat7-basic-sti \
+ templates/jws-tomcat7-mongodb-persistent-sti templates/jws-tomcat7-mongodb-sti \
+ templates/jws-tomcat7-mysql-persistent-sti templates/jws-tomcat7-mysql-sti \
+ templates/jws-tomcat7-postgresql-persistent-sti templates/jws-tomcat7-postgresql-sti \
+ templates/jws-tomcat8-basic-sti templates/jws-tomcat8-basic-sti templates/jws-tomcat8-mongodb-persistent-sti
+ when: openshift_examples_load_xpaas | bool
+ register: oex_delete_old_xpaas_templates
+ failed_when: "'not found' not in oex_delete_old_xpaas_templates.stderr and oex_delete_old_xpaas_templates.rc != 0"
+ changed_when: false
- name: Import xPaas image streams
command: >
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 3570de693..ae530eadd 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -20,8 +20,29 @@ EXAMPLES = '''
import ConfigParser
import copy
import os
+import StringIO
+import yaml
from distutils.util import strtobool
+from distutils.version import LooseVersion
+import struct
+import socket
+def first_ip(network):
+ """ Return the first IPv4 address in network
+
+ Args:
+ network (str): network in CIDR format
+ Returns:
+ str: first IPv4 address
+ """
+ def atoi(addr):
+ return struct.unpack("!I", socket.inet_aton(addr))[0]
+ def itoa(addr):
+ return socket.inet_ntoa(struct.pack("!I", addr))
+
+ (address, netmask) = network.split('/')
+ netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
+ return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -305,6 +326,23 @@ def set_fluentd_facts_if_unset(facts):
facts['common']['use_fluentd'] = use_fluentd
return facts
+def set_flannel_facts_if_unset(facts):
+ """ Set flannel facts if not already present in facts dict
+ dict: the facts dict updated with the flannel facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the flannel
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_flannel' not in facts['common']:
+ use_flannel = False
+ facts['common']['use_flannel'] = use_flannel
+ return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -362,6 +400,33 @@ def set_metrics_facts_if_unset(facts):
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
+def set_project_cfg_facts_if_unset(facts):
+ """ Set Project Configuration facts if not already present in facts dict
+ dict:
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated Project Configuration
+ facts if they were not already present
+
+ """
+
+ config = {
+ 'default_node_selector': '',
+ 'project_request_message': '',
+ 'project_request_template': '',
+ 'mcs_allocator_range': 's0:/2',
+ 'mcs_labels_per_project': 5,
+ 'uid_allocator_range': '1000000000-1999999999/10000'
+ }
+
+ if 'master' in facts:
+ for key, value in config.items():
+ if key not in facts['master']:
+ facts['master'][key] = value
+
+ return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -378,7 +443,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type == 'enterprise':
+ if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -455,24 +520,84 @@ def set_aggregate_facts(facts):
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
+ internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
+ all_hostnames.add(facts['common']['ip'])
+ all_hostnames.add(facts['common']['public_ip'])
+
+ internal_hostnames.add(facts['common']['hostname'])
+ internal_hostnames.add(facts['common']['ip'])
if 'master' in facts:
+ # FIXME: not sure why but facts['dns']['domain'] fails
+ cluster_domain = 'cluster.local'
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
+ svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+ 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+ 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+ all_hostnames.update(svc_names)
+ internal_hostnames.update(svc_names)
+ first_svc_ip = first_ip(facts['master']['portal_net'])
+ all_hostnames.add(first_svc_ip)
+ internal_hostnames.add(first_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
+
+def set_etcd_facts_if_unset(facts):
+ """
+ If using embedded etcd, loads the data directory from master-config.yaml.
+
+ If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
+
+ If anything goes wrong parsing these, the fact will not be set.
+ """
+ if 'etcd' in facts:
+ if 'master' in facts and facts['master']['embedded_etcd']:
+ try:
+ # Parse master config to find actual etcd data dir:
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ master_cfg_f = open(master_cfg_path, 'r')
+ config = yaml.safe_load(master_cfg_f.read())
+ master_cfg_f.close()
+
+ facts['etcd']['etcd_data_dir'] = \
+ config['etcdConfig']['storageDirectory']
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+ else:
+ # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+ try:
+ # Add a fake section for parsing:
+ ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+ ini_fp = StringIO.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+ if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+ etcd_data_dir = etcd_data_dir[1:-1]
+ facts['etcd']['etcd_data_dir'] = etcd_data_dir
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+ return facts
+
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
- node.registry_url
+ node.registry_url, node.storage_plugin_deps
Args:
facts (dict): existing facts
@@ -480,8 +605,9 @@ def set_deployment_facts_if_unset(facts):
dict: the facts dict updated with the generated deployment_type
facts
"""
- # Perhaps re-factor this as a map?
- # pylint: disable=too-many-branches
+ # disabled to avoid breaking up facts related to deployment type into
+ # multiple methods for now.
+ # pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
@@ -495,13 +621,18 @@ def set_deployment_facts_if_unset(facts):
config_base = '/etc/origin'
if deployment_type in ['enterprise', 'online']:
config_base = '/etc/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
+ config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
+ data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
- facts['common']['version'] = get_openshift_version()
for role in ('master', 'node'):
if role in facts:
@@ -514,14 +645,55 @@ def set_deployment_facts_if_unset(facts):
registry_url = 'aep3/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
+ if 'master' in facts:
+ deployment_type = facts['common']['deployment_type']
+ openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
+ if 'disabled_features' in facts['master']:
+ if deployment_type == 'atomic-enterprise':
+ curr_disabled_features = set(facts['master']['disabled_features'])
+ facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
+ else:
+ if deployment_type == 'atomic-enterprise':
+ facts['master']['disabled_features'] = openshift_features
+
+ if 'node' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'storage_plugin_deps' not in facts['node']:
+ if deployment_type in ['openshift-enterprise', 'atomic-enterprise']:
+ facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs']
+ else:
+ facts['node']['storage_plugin_deps'] = []
+
return facts
+def set_version_facts_if_unset(facts):
+ """ Set version facts. This currently includes common.version and
+ common.version_greater_than_3_1_or_1_1.
-def set_sdn_facts_if_unset(facts):
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with version facts.
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ facts['common']['version'] = version = get_openshift_version()
+ if version is not None:
+ if deployment_type == 'origin':
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+ else:
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+ else:
+ version_gt_3_1_or_1_1 = True
+ facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
+ return facts
+
+def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
+ system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
@@ -540,9 +712,18 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
- if 'node' in facts:
- if 'sdn_mtu' not in facts['node']:
- facts['node']['sdn_mtu'] = '1450'
+ if 'node' in facts and 'sdn_mtu' not in facts['node']:
+ node_ip = facts['common']['ip']
+
+ # default MTU if interface MTU cannot be detected
+ facts['node']['sdn_mtu'] = '1450'
+
+ for val in system_facts.itervalues():
+ if isinstance(val, dict) and 'mtu' in val:
+ mtu = val['mtu']
+
+ if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
+ facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
@@ -632,7 +813,7 @@ def get_openshift_version():
Returns:
version: the current openshift version
"""
- version = ''
+ version = None
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
@@ -775,7 +956,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -807,14 +988,18 @@ class OpenShiftFacts(object):
facts = merge_facts(facts, local_facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
+ facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
+ facts = set_flannel_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
+ facts = set_etcd_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -853,11 +1038,12 @@ class OpenShiftFacts(object):
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
- oauth_grant_method='auto', cluster_defer_ha=False)
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
- node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16')
+ node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
+ iptables_sync_period='5s')
defaults['node'] = node
return defaults
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 6301d4fc0..913f0dc78 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -6,5 +6,10 @@
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
+- name: Ensure PyYaml is installed
+ yum: pkg={{ item }} state=installed
+ with_items:
+ - PyYAML
+
- name: Gather Cluster facts
openshift_facts:
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 2981979e0..4b9500cbd 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,14 @@
---
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
- when: not openshift_master_ha | bool
+ when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index a5c1a805c..185bfb8f3 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -9,16 +9,22 @@
when: openshift_master_oauth_grant_method is defined
- fail:
+ msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+- fail:
+ msg: "'native' high availability is not supported for the requested OpenShift version"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool
+- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
- name: Set master facts
openshift_facts:
role: master
local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -41,6 +47,8 @@
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
@@ -52,11 +60,19 @@
default_subdomain: "{{ osm_default_subdomain | default(None) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
+ project_request_message: "{{ osm_project_request_message | default(None) }}"
+ project_request_template: "{{ osm_project_request_template | default(None) }}"
+ mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
+ mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
+ uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
router_selector: "{{ openshift_router_selector | default(None) }}"
registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
infra_nodes: "{{ num_infra | default(None) }}"
+ disabled_features: "{{ osm_disabled_features | default(None) }}"
+ master_count: "{{ openshift_master_count | default(None) }}"
+ controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
@@ -71,7 +87,7 @@
domain: cluster.local
when: openshift.master.embedded_dns
-- name: Create config parent directory if it doesn't exist
+- name: Create config parent directory if it does not exist
file:
path: "{{ openshift_master_config_dir }}"
state: directory
@@ -84,6 +100,8 @@
creates: "{{ openshift_master_policy }}"
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
template:
@@ -92,6 +110,8 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
@@ -114,6 +134,44 @@
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+# workaround for missing systemd unit files for controllers/api
+- name: Create the api service file
+ template:
+ src: atomic-openshift-master-api.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers service file
+ template:
+ src: atomic-openshift-master-controllers.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the api env file
+ template:
+ src: atomic-openshift-master-api.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers env file
+ template:
+ src: atomic-openshift-master-controllers.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- command: systemctl daemon-reload
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+# end workaround for missing systemd unit files
+
+- name: Create session secrets file
+ template:
+ dest: "{{ openshift.master.session_secrets_file }}"
+ src: sessionSecretsFile.yaml.v1.j2
+ force: no
+ notify:
+ - restart master
+ - restart master api
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
@@ -122,12 +180,15 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Configure master settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
+ create: yes
with_items:
- regex: '^OPTIONS='
line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
@@ -136,23 +197,72 @@
notify:
- restart master
+- name: Configure master api settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master api
+
+- name: Configure master controller settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master controllers
+
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
register: start_result
-- name: pause to prevent service restart from interfering with bootstrapping
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ master_service_status_changed = start_result | changed
+ when: not openshift_master_ha | bool
+
+- name: Start and enable master api
+ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+
+- set_fact:
+ master_api_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+
+# TODO: fix the ugly workaround of setting ignore_errors
+# the controllers service tries to start even if it is already started
+- name: Start and enable master controller
+ service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+ ignore_errors: yes
+
+- set_fact:
+ master_controllers_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- name: Set the cluster user password
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
new file mode 100644
index 000000000..ba19fb348
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
new file mode 100644
index 000000000..8952c86ef
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 6e45eaad7..bb12a0a0f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,5 +1,7 @@
apiLevels:
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
- v1beta3
+{% endif %}
- v1
apiVersion: v1
assetConfig:
@@ -8,21 +10,33 @@ assetConfig:
publicURL: {{ openshift.master.public_console_url }}/
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_ha | bool %}
+controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
+{% endif %}
+controllers: '*'
corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- {{ origin }}
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+ - {{ name }}
+{% endfor %}
+{% if 'disabled_features' in openshift.master %}
+disabledFeatures: {{ openshift.master.disabled_features | to_json }}
+{% endif %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+ bindNetwork: tcp4
{% endif %}
etcdClientInfo:
ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
@@ -65,15 +79,19 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
apiLevels:
- v1beta3
- v1
+{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
-{# TODO: support overriding masterCount #}
- masterCount: 1
- masterIP: ""
+ masterCount: {{ openshift.master.master_count }}
+ masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
+ proxyClientInfo:
+ certFile: master.proxy-client.crt
+ keyFile: master.proxy-client.key
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
servicesSubnet: {{ openshift.master.portal_net }}
@@ -93,22 +111,23 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
+pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
-{# TODO: Allow users to override projectConfig items #}
projectConfig:
- defaultNodeSelector: "{{ openshift.master.default_node_selector | default("") }}"
- projectRequestMessage: ""
- projectRequestTemplate: ""
+ defaultNodeSelector: "{{ openshift.master.default_node_selector }}"
+ projectRequestMessage: "{{ openshift.master.project_request_message }}"
+ projectRequestTemplate: "{{ openshift.master.project_request_template }}"
securityAllocator:
- mcsAllocatorRange: s0:/2
- mcsLabelsPerProject: 5
- uidAllocatorRange: 1000000000-1999999999/10000
+ mcsAllocatorRange: "{{ openshift.master.mcs_allocator_range }}"
+ mcsLabelsPerProject: {{ openshift.master.mcs_labels_per_project }}
+ uidAllocatorRange: "{{ openshift.master.uid_allocator_range }}"
routingConfig:
subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
+ limitSecretReferences: false
managedNames:
- default
- builder
@@ -119,8 +138,20 @@ serviceAccountConfig:
- serviceaccounts.public.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
+{% if named_certificates %}
+ namedCertificates:
+{% for named_certificate in named_certificates %}
+ - certFile: {{ named_certificate['certfile'] }}
+ keyFile: {{ named_certificate['keyfile'] }}
+ names:
+{% for name in named_certificate['names'] %}
+ - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
new file mode 100644
index 000000000..d12d9db90
--- /dev/null
+++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift_master_session_auth_secrets %}
+- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}"
+ encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}"
+{% endfor %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index ecdb4f883..534465451 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -2,6 +2,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 5c9639ea5..314f068e7 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -14,9 +14,8 @@
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ master_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
- args:
- creates: "{{ openshift_master_config_dir }}/master.server.key"
+ when: master_certs_missing
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 0d75a9eb3..13e5d7a4b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -6,13 +6,9 @@
mode: 0700
with_items: masters_needing_certs
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: hard
- with_nested:
- - masters_needing_certs
- - - ca.crt
+- set_fact:
+ master_certificates:
+ - ca.crt
- ca.key
- ca.serial.txt
- admin.crt
@@ -31,7 +27,17 @@
- openshift-router.kubeconfig
- serviceaccounts.private.key
- serviceaccounts.public.key
+ master_31_certificates:
+ - master.proxy-client.crt
+ - master.proxy-client.key
+- file:
+ src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
+ dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: hard
+ with_nested:
+ - masters_needing_certs
+ - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}"
- name: Create the master certificates if they do not already exist
command: >
@@ -41,6 +47,5 @@
--public-master={{ item.openshift.master.public_api_url }}
--cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
--overwrite=false
- args:
- creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt"
+ when: master_certs_missing
with_items: masters_needing_certs
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
deleted file mode 100644
index 3b416005b..000000000
--- a/roles/openshift_master_cluster/tasks/configure_deferred.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- debug: msg="Deferring config"
-
-- name: Start and enable the master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 315947183..6303a6e46 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -4,10 +4,7 @@
register: pcs_status
changed_when: false
failed_when: false
- when: not openshift.master.cluster_defer_ha | bool
+ when: openshift.master.cluster_method == "pacemaker"
- include: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
-
-- include: configure_deferred.yml
- when: openshift.master.cluster_defer_ha | bool
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 633f3ed13..447ca85f3 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,7 @@
---
- name: restart node
service: name={{ openshift.common.service_type }}-node state=restarted
+ when: not node_service_status_changed | default(false)
- name: restart docker
service: name=docker state=restarted
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c92008a77..9d40ae3b3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_common }
+- { role: docker }
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d45dd8073..d11bc5123 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -8,7 +8,7 @@
when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
- when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
+ when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- name: Set node facts
openshift_facts:
@@ -22,14 +22,18 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
annotations: "{{ openshift_node_annotations | default(none) }}"
- registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}"
+ docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}"
+ iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+ storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly.
@@ -68,12 +72,14 @@
register: docker_check
# TODO: Enable secure registry when code available in origin
-- name: Secure Registry
+- name: Secure Registry and Logs Options
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
-{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'"
+{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \
+{% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }} {% endif %} \
+{% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}} {% endif %} '"
when: docker_check.stat.isreg
notify:
- restart docker
@@ -120,14 +126,12 @@
notify:
- restart docker
-- name: Allow NFS access for VMs
- seboolean: name=virt_use_nfs state=yes persistent=yes
- when: ansible_selinux and ansible_selinux.status == "enabled"
+- name: Additional storage plugin configuration
+ include: storage_plugins/main.yml
- name: Start and enable node
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
-- name: pause to prevent service restart from interfering with bootstrapping
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ node_service_status_changed = start_result | changed
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
new file mode 100644
index 000000000..b6936618a
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -0,0 +1,5 @@
+---
+- name: Install Ceph storage plugin dependencies
+ yum:
+ pkg: ceph-common
+ state: installed
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
new file mode 100644
index 000000000..b812e81df
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -0,0 +1,12 @@
+---
+- name: Install GlusterFS storage plugin dependencies
+ yum:
+ pkg: glusterfs-fuse
+ state: installed
+
+- name: Set seboolean to allow gluster storage plugin access from containers
+ seboolean:
+ name: virt_use_fusefs
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
diff --git a/roles/openshift_node/tasks/storage_plugins/main.yml b/roles/openshift_node/tasks/storage_plugins/main.yml
new file mode 100644
index 000000000..39c7b9390
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/main.yml
@@ -0,0 +1,13 @@
+---
+# The NFS storage plugin is always enabled since it doesn't require any
+# additional package dependencies
+- name: NFS storage plugin configuration
+ include: nfs.yml
+
+- name: GlusterFS storage plugin configuration
+ include: glusterfs.yml
+ when: "'glusterfs' in openshift.node.storage_plugin_deps"
+
+- name: Ceph storage plugin configuration
+ include: ceph.yml
+ when: "'ceph' in openshift.node.storage_plugin_deps"
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
new file mode 100644
index 000000000..1edf21d9b
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -0,0 +1,7 @@
+---
+- name: Set seboolean to allow nfs storage plugin access from containers
+ seboolean:
+ name: virt_use_nfs
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 4931d127e..7d2f506e3 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -4,6 +4,7 @@ dnsDomain: {{ osn_cluster_dns_domain }}
dnsIP: {{ osn_cluster_dns_ip }}
dockerConfig:
execHandlerName: ""
+iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
@@ -22,6 +23,7 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+nodeIP: {{ openshift.common.ip }}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 12e98b7a1..aa696ae12 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -8,7 +8,7 @@
# proper repos correctly.
- assert:
- that: openshift_deployment_type in known_openshift_deployment_types
+ that: openshift.common.deployment_type in known_openshift_deployment_types
- name: Ensure libselinux-python is installed
yum:
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index a503b24d7..59c89bb02 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -15,6 +15,8 @@
- include_vars: template_ops_tools.yml
- include_vars: template_app_zabbix_server.yml
- include_vars: template_app_zabbix_agent.yml
+- include_vars: template_performance_copilot.yml
+- include_vars: template_aws.yml
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
@@ -79,3 +81,19 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+
+- name: Include Template Performance Copilot
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_performance_copilot }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template AWS
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_aws }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
diff --git a/roles/os_zabbix/vars/template_app_zabbix_agent.yml b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
index 6349b6384..d636d4822 100644
--- a/roles/os_zabbix/vars/template_app_zabbix_agent.yml
+++ b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
@@ -6,14 +6,14 @@ g_template_app_zabbix_agent:
applications:
- Zabbix agent
value_type: character
- zabbix_type: 0
+ zabbix_type: agent
- key: agent.ping
applications:
- Zabbix agent
description: The agent always returns 1 for this item. It could be used in combination with nodata() for availability check.
value_type: int
- zabbix_type: 0
+ zabbix_type: agent
ztriggers:
- name: '[Reboot] Zabbix agent on {HOST.NAME} is unreachable for 15 minutes'
diff --git a/roles/os_zabbix/vars/template_app_zabbix_server.yml b/roles/os_zabbix/vars/template_app_zabbix_server.yml
index aeec16254..43517113b 100644
--- a/roles/os_zabbix/vars/template_app_zabbix_server.yml
+++ b/roles/os_zabbix/vars/template_app_zabbix_server.yml
@@ -8,7 +8,7 @@ g_template_app_zabbix_server:
description: A simple count of the number of partition creates output by the housekeeper script.
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
- key: housekeeper_drops
applications:
@@ -16,7 +16,7 @@ g_template_app_zabbix_server:
description: A simple count of the number of partition drops output by the housekeeper script.
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
- key: housekeeper_errors
applications:
@@ -24,7 +24,7 @@ g_template_app_zabbix_server:
description: A simple count of the number of errors output by the housekeeper script.
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
- key: housekeeper_total
applications:
@@ -33,7 +33,7 @@ g_template_app_zabbix_server:
script.
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,alerter,avg,busy]
applications:
@@ -41,7 +41,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,configuration syncer,avg,busy]
applications:
@@ -49,7 +49,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,db watchdog,avg,busy]
applications:
@@ -57,7 +57,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,discoverer,avg,busy]
applications:
@@ -65,7 +65,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,escalator,avg,busy]
applications:
@@ -73,7 +73,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,history syncer,avg,busy]
applications:
@@ -81,7 +81,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,housekeeper,avg,busy]
applications:
@@ -89,7 +89,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,http poller,avg,busy]
applications:
@@ -97,7 +97,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,icmp pinger,avg,busy]
applications:
@@ -105,7 +105,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,ipmi poller,avg,busy]
applications:
@@ -113,7 +113,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,java poller,avg,busy]
applications:
@@ -121,7 +121,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,node watcher,avg,busy]
applications:
@@ -129,7 +129,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,poller,avg,busy]
applications:
@@ -137,7 +137,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,proxy poller,avg,busy]
applications:
@@ -145,7 +145,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,self-monitoring,avg,busy]
applications:
@@ -153,7 +153,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,snmp trapper,avg,busy]
applications:
@@ -161,7 +161,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,timer,avg,busy]
applications:
@@ -169,7 +169,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,trapper,avg,busy]
applications:
@@ -177,7 +177,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[process,unreachable poller,avg,busy]
applications:
@@ -185,7 +185,7 @@ g_template_app_zabbix_server:
description: ''
units: '%'
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[queue,10m]
applications:
@@ -193,7 +193,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
interval: 600
- key: zabbix[queue]
@@ -202,7 +202,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: int
- zabbix_type: 5
+ zabbix_type: internal
interval: 600
- key: zabbix[rcache,buffer,pfree]
@@ -211,7 +211,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[wcache,history,pfree]
applications:
@@ -219,7 +219,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[wcache,text,pfree]
applications:
@@ -227,7 +227,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[wcache,trend,pfree]
applications:
@@ -235,7 +235,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
- key: zabbix[wcache,values]
applications:
@@ -243,7 +243,7 @@ g_template_app_zabbix_server:
description: ''
units: ''
value_type: float
- zabbix_type: 5
+ zabbix_type: internal
delta: 1 # speed per second
ztriggers:
diff --git a/roles/os_zabbix/vars/template_aws.yml b/roles/os_zabbix/vars/template_aws.yml
new file mode 100644
index 000000000..0ed682128
--- /dev/null
+++ b/roles/os_zabbix/vars/template_aws.yml
@@ -0,0 +1,25 @@
+---
+g_template_aws:
+ name: Template AWS
+ zdiscoveryrules:
+ - name: disc.aws
+ key: disc.aws
+ lifetime: 1
+ description: "Dynamically register AWS bucket info"
+
+ zitemprototypes:
+ - discoveryrule_key: disc.aws
+ name: "S3 bucket size (GB) [{#S3_BUCKET}]"
+ key: "disc.aws.size[{#S3_BUCKET}]"
+ value_type: int
+ description: "Size of S3 bucket"
+ applications:
+ - AWS
+
+ - discoveryrule_key: disc.aws
+ name: "S3 bucket object count [{#S3_BUCKET}]"
+ key: "disc.aws.objects[{#S3_BUCKET}]"
+ value_type: int
+ description: "Objects in S3 bucket"
+ applications:
+ - AWS
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
index 395e054de..bfabf50c5 100644
--- a/roles/os_zabbix/vars/template_docker.yml
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -7,6 +7,11 @@ g_template_docker:
- Docker Daemon
value_type: int
+ - key: docker.info_elapsed_ms
+ applications:
+ - Docker Daemon
+ value_type: int
+
- key: docker.storage.is_loopback
applications:
- Docker Storage
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 1de4fefbb..6defc4989 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -31,6 +31,78 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.etcd.create.success
+ description: Show number of successful create actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.create.fail
+ description: Show number of failed create actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.delete.success
+ description: Show number of successful delete actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.delete.fail
+ description: Show number of failed delete actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.get.success
+ description: Show number of successful get actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.get.fail
+ description: Show number of failed get actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.set.success
+ description: Show number of successful set actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.set.fail
+ description: Show number of failed set actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.update.success
+ description: Show number of successful update actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.update.fail
+ description: Show number of failed update actions
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.watchers
+ description: Show number of etcd watchers
+ type: int
+ applications:
+ - Openshift Etcd
+
+ - key: openshift.master.etcd.ping
+ description: etcd ping
+ type: int
+ applications:
+ - Openshift Etcd
+
ztriggers:
- name: 'Application creation has failed on {HOST.NAME}'
expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
@@ -56,3 +128,13 @@ g_template_openshift_master:
expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
+
+ - name: 'Low number of etcd watchers on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: avg
+
+ - name: 'Etcd ping failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: high
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 3ae1500bc..04665be62 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -194,6 +194,16 @@ g_template_os_linux:
lifetime: 1
description: "Dynamically register the filesystems"
+ - name: disc.disk
+ key: disc.disk
+ lifetime: 1
+ description: "Dynamically register disks on a node"
+
+ - name: disc.network
+ key: disc.network
+ lifetime: 1
+ description: "Dynamically register network interfaces on a node"
+
zitemprototypes:
- discoveryrule_key: disc.filesys
name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -211,6 +221,42 @@ g_template_os_linux:
applications:
- Disk
+ - discoveryrule_key: disc.disk
+ name: "TPS (IOPS) for disk {#OSO_DISK}"
+ key: "disc.disk.tps[{#OSO_DISK}]"
+ value_type: int
+ description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.disk
+ name: "Percent Utilized for disk {#OSO_DISK}"
+ key: "disc.disk.putil[{#OSO_DISK}]"
+ value_type: float
+ description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
ztriggerprototypes:
- name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
@@ -246,15 +292,15 @@ g_template_os_linux:
# CPU Utilization #
- name: 'CPU idle less than 5% on {HOST.NAME}'
- expression: '{Template OS Linux:kernel.all.cpu.idle.last()}<5 and {Template OS Linux:kernel.all.cpu.idle.last(#2)}<5'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<5'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
priority: average
description: 'CPU is less than 5% idle'
- name: 'CPU idle less than 10% on {HOST.NAME}'
- expression: '{Template OS Linux:kernel.all.cpu.idle.last()}<10 and {Template OS Linux:kernel.all.cpu.idle.last(#2)}<10'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<10'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
- priority: warn
+ priority: average
description: 'CPU is less than 10% idle'
dependencies:
- 'CPU idle less than 5% on {HOST.NAME}'
diff --git a/roles/os_zabbix/vars/template_performance_copilot.yml b/roles/os_zabbix/vars/template_performance_copilot.yml
new file mode 100644
index 000000000..b62fa0228
--- /dev/null
+++ b/roles/os_zabbix/vars/template_performance_copilot.yml
@@ -0,0 +1,14 @@
+---
+g_template_performance_copilot:
+ name: Template Performance Copilot
+ zitems:
+ - key: pcp.ping
+ applications:
+ - Performance Copilot
+ value_type: int
+
+ ztriggers:
+ - name: 'pcp.ping failed on {HOST.NAME}'
+ expression: '{Template Performance Copilot:pcp.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_pcp_ping.asciidoc'
+ priority: average
diff --git a/test/units/README.md b/test/units/README.md
index 3bed227eb..78a02c3ea 100644
--- a/test/units/README.md
+++ b/test/units/README.md
@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
$ source test/env-setup
Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
new file mode 100755
index 000000000..168cd82b7
--- /dev/null
+++ b/test/units/multi_inventory_test.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+ '''
+ Test class for multiInventory
+ '''
+
+# def setUp(self):
+# '''setup method'''
+# pass
+
+ def test_merge_simple_1(self):
+ '''Testing a simple merge of 2 dictionaries'''
+ a = {"key1" : 1}
+ b = {"key1" : 2}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2]})
+
+ def test_merge_b_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ a = {"key1" : 1}
+ b = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_a_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ b = {"key1" : 1}
+ a = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_hash_array(self):
+ '''Testing a merge of a dictionary and a dictionary with an array'''
+ a = {"key1" : {"hasha": 1}}
+ b = {"key1" : [1, 2]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+ def test_merge_array_hash(self):
+ '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+ a = {"key1" : [1, 2]}
+ b = {"key1" : {"hasha": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+ def test_merge_keys_1(self):
+ '''Testing a merge on a dictionary for keys'''
+ a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+ b = {"key2" : {"hashb": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+ def test_merge_recursive_1(self):
+ '''Testing a recursive merge'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_array_item(self):
+ '''Testing a recursive merge for an array'''
+ a = {"a" : {"b": {"c": [1]}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_hash_item(self):
+ '''Testing a recursive merge for a hash'''
+ a = {"a" : {"b": {"c": {"d": 1}}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+ def test_merge_recursive_array_hash(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : [{"b": {"c": 1}}]}
+ b = {"a" : {"b": {"c": 1}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+ def test_merge_recursive_hash_array(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : [{"b": {"c": 1}}]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+# def tearDown(self):
+# '''TearDown method'''
+# pass
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py
deleted file mode 100755
index 95df93cd2..000000000
--- a/test/units/mutli_ec2_test.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_merge_simple_1(self):
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2]})
-
- def test_merge_b_empty(self):
- a = {"key1" : 1}
- b = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- b = {"key1" : 1}
- a = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1,2]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
- def test_merge_array_hash(self):
- a = {"key1" : [1,2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- a = {"key1" : [1,2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_array_item(self):
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_hash_item(self):
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def tearDown(self):
- pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/utils/.gitignore b/utils/.gitignore
new file mode 100644
index 000000000..68759c0ba
--- /dev/null
+++ b/utils/.gitignore
@@ -0,0 +1,45 @@
+package/
+
+# Backup files
+*.~
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+.tox/
+.coverage
+.cache
+.noseids
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Sphinx documentation
+docs/_build/
diff --git a/utils/README.txt b/utils/README.txt
new file mode 100644
index 000000000..6a6a1d24d
--- /dev/null
+++ b/utils/README.txt
@@ -0,0 +1,24 @@
+## Running From Source
+
+You will need to setup a virtualenv to run from source and execute the unit tests.
+
+$ virtualenv oo-install
+$ source ./oo-install/bin/activate
+$ virtualenv --relocatable ./oo-install/
+$ python setup.py install
+
+The virtualenv bin directory should now be at the start of your $PATH, and oo-install is ready to use from your shell.
+
+You can exit the virtualenv with:
+
+$ deactivate
+
+## Testing
+
+Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
+
+$ pip install mock nose
+
+Then run the tests with:
+
+$ oo-install/bin/nosetests
diff --git a/utils/docs/config.md b/utils/docs/config.md
new file mode 100644
index 000000000..2729f8d37
--- /dev/null
+++ b/utils/docs/config.md
@@ -0,0 +1,80 @@
+# oo-install Supported Configuration File
+
+Upon completion oo-install will write out a configuration file representing the settings that were gathered and used. This configuration file, or one crafted by hand, can be used to run or re-run the installer and add additional hosts, upgrade, or re-install.
+
+The default location this config file will be written to ~/.config/openshift/installer.cfg.yml.
+
+## Example
+
+```
+version: v1
+variant: openshift-enterprise
+variant_version: 3.0
+ansible_ssh_user: root
+hosts:
+- ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ containerized: true
+ connect_to: 24.222.0.1
+- ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ connect_to: 10.0.0.2
+- ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+ connect_to: 10.0.0.3
+```
+
+## Primary Settings
+
+### version
+
+Indicates the version of configuration this file was written with. Current implementation is v1.
+
+### variant
+
+The OpenShift variant to install. Currently valid options are:
+
+ * openshift-enterprise
+ * atomic-enterprise
+
+### variant_version (optional)
+
+Default: Latest version for your chosen variant.
+
+A version which must be valid for your selected variant. If not specified the latest will be assumed.
+
+Examples: 3.0, 3.1, etc.
+
+### hosts
+
+This section defines a list of the hosts you wish to install the OpenShift master/node service on.
+
+*ip* or *hostname* must be specified so the installer can connect to the system to gather facts before proceeding with the install.
+
+If *public_ip* or *public_hostname* are not specified, this information will be gathered from the facts and the user will be asked to confirm in an editor. For an unattended install, the installer will error out. (you must provide complete host records for an unattended install)
+
+*master* and *node* determine the type of services that will be installed. One of these must be set to true for the configuration file to be considered valid.
+
+*containerized* indicates you want to run OpenShift services in a container on this host.
+
+### ansible_ssh_user
+
+Default: root
+
+Defines the user ansible will use to ssh to remote systems for gathering facts and the installation.
+
+### ansible_log_path
+
+Default: /tmp/ansible.log
+
+
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
new file mode 100644
index 000000000..b7376ddfc
--- /dev/null
+++ b/utils/etc/ansible.cfg
@@ -0,0 +1,25 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+forks = 10
+host_key_checking = False
+nocows = 1
+# Need to handle:
+# inventory - derive from OO_ANSIBLE_DIRECTORY env var
+# callback_plugins - derive from pkg_resource.resource_filename
+# private_key_file - prompt if missing
+# remote_tmp - set if provided by user (cli)
+# ssh_args - set if provided by user (cli)
+# control_path \ No newline at end of file
diff --git a/utils/setup.cfg b/utils/setup.cfg
new file mode 100644
index 000000000..79bc67848
--- /dev/null
+++ b/utils/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
diff --git a/utils/setup.py b/utils/setup.py
new file mode 100644
index 000000000..eac1b4b2e
--- /dev/null
+++ b/utils/setup.py
@@ -0,0 +1,85 @@
+"""A setuptools based setup module.
+
+"""
+
+# Always prefer setuptools over distutils
+from setuptools import setup
+
+setup(
+ name='ooinstall',
+
+ # Versions should comply with PEP440. For a discussion on single-sourcing
+ # the version across setup.py and the project code, see
+ # https://packaging.python.org/en/latest/single_source_version.html
+ version="3.0.0",
+
+ description="Ansible wrapper for OpenShift Enterprise 3 installation.",
+
+ # The project's main homepage.
+ url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
+
+ # Author details
+ author="openshift@redhat.com",
+ author_email="OpenShift",
+
+ # Choose your license
+ license="Apache 2.0",
+
+ # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: Utilities',
+ ],
+
+ # What does your project relate to?
+ keywords='oo-install setuptools development',
+
+ # You can just specify the packages manually here if your project is
+ # simple. Or you can use find_packages().
+ #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
+ packages=['ooinstall'],
+ package_dir={'ooinstall': 'src/ooinstall'},
+
+
+ # List run-time dependencies here. These will be installed by pip when
+ # your project is installed. For an analysis of "install_requires" vs pip's
+ # requirements files see:
+ # https://packaging.python.org/en/latest/requirements.html
+ install_requires=['click', 'PyYAML'],
+
+ # List additional groups of dependencies here (e.g. development
+ # dependencies). You can install these using the following syntax,
+ # for example:
+ # $ pip install -e .[dev,test]
+ #extras_require={
+ # 'dev': ['check-manifest'],
+ # 'test': ['coverage'],
+ #},
+
+ # If there are data files included in your packages that need to be
+ # installed, specify them here. If using Python 2.6 or less, then these
+ # have to be included in MANIFEST.in as well.
+ package_data={
+ 'ooinstall': ['ansible.cfg', 'ansible_plugins/*'],
+ },
+
+ # Although 'package_data' is the preferred approach, in some case you may
+ # need to place data files outside of your packages. See:
+ # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
+ # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
+ #data_files=[('my_data', ['data/data_file'])],
+ tests_require=['nose'],
+
+ test_suite='nose.collector',
+
+ # To provide executable scripts, use entry points in preference to the
+ # "scripts" keyword. Entry points provide cross-platform support and allow
+ # pip to create the appropriate form of executable for the target platform.
+ entry_points={
+ 'console_scripts': [
+ 'oo-install=ooinstall.cli_installer:cli',
+ ],
+ },
+)
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
new file mode 100755
index 000000000..e1b2cec90
--- /dev/null
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+
+# Grab command-line arguments
+cmdlnargs="$@"
+
+: ${OO_INSTALL_KEEP_ASSETS:="false"}
+: ${OO_INSTALL_CONTEXT:="INSTALLCONTEXT"}
+: ${TMPDIR:=/tmp}
+: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
+[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ clear
+ echo "Checking for necessary tools..."
+fi
+if [ -e /etc/redhat-release ]
+then
+ for i in python python-virtualenv openssh-clients gcc
+ do
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; }
+ done
+fi
+for i in python virtualenv ssh gcc
+do
+ command -v $i >/dev/null 2>&1 || { echo >&2 "OpenShift installation requires $i on the PATH but it does not appear to be available. Correct this and rerun the installer."; exit 1; }
+done
+
+# All instances of INSTALLPKGNAME are replaced during packaging with the actual package name.
+if [[ -e ./INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using bundled assets."
+ fi
+ cp INSTALLPKGNAME.tgz ${TMPDIR}/INSTALLPKGNAME.tgz
+elif [[ $OO_INSTALL_KEEP_ASSETS == 'true' && -e ${TMPDIR}/INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using existing installer assets."
+ fi
+else
+ echo "Downloading oo-install package to ${TMPDIR}INSTALLPKGNAME.tgz..."
+ curl -s -o ${TMPDIR}INSTALLPKGNAME.tgz https://install.openshift.com/INSTALLVERPATHINSTALLPKGNAME.tgz
+fi
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Extracting oo-install to ${TMPDIR}INSTALLPKGNAME..."
+fi
+tar xzf ${TMPDIR}INSTALLPKGNAME.tgz -C ${TMPDIR} 2>&1 >> $OO_INSTALL_LOG
+
+echo "Preparing to install. This can take a minute or two..."
+virtualenv ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+cd ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+source ./bin/activate 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTALL_LOG
+
+# TODO: these deps should technically be handled as part of installing ooinstall
+pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG
+echo "Installation preperation done!" 2>&1 >> $OO_INSTALL_LOG
+
+echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Starting oo-install..." 2>&1 >> $OO_INSTALL_LOG
+else
+ clear
+fi
+oo-install $cmdlnargs --ansible-playbook-directory ${TMPDIR}/INSTALLPKGNAME/openshift-ansible-*/ --ansible-log-path $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_KEEP_ASSETS == 'true' ]
+then
+ echo "Keeping temporary assets in ${TMPDIR}"
+else
+ echo "Removing temporary assets."
+ rm -rf ${TMPDIR}INSTALLPKGNAME
+ rm -rf ${TMPDIR}INSTALLPKGNAME.tgz
+fi
+
+echo "Please see $OO_INSTALL_LOG for full output."
+
+exit
diff --git a/utils/site_assets/oo_install_launcher.README.txt b/utils/site_assets/oo_install_launcher.README.txt
new file mode 100644
index 000000000..46947b481
--- /dev/null
+++ b/utils/site_assets/oo_install_launcher.README.txt
@@ -0,0 +1,22 @@
+= oo-install Portable Installer Package
+
+This package is identical to the installer package that can be downloaded
+and executed directly from https://install.openshift.com/.
+
+NOTE: It will still be necessary for this installer to download RPMs from the
+internet, unless you have already set up the necessary local repositories.
+
+To run the installer from this package, run the following command:
+
+$ ./LAUNCHERNAME
+
+That command script and the packaged zip file can be burned to a CD or
+written to a USB drive and used to run the oo-install utility in places
+where the web-based installer is not reachable.
+
+All of the command-line arguments supported by oo-install can be passed
+to this launcher application.
+
+For more information for Enterprise installs, refer to the OpenShift
+Enterprise Administrator Guide:
+https://docs.openshift.com/enterprise/latest/welcome/index.html
diff --git a/utils/src/DESCRIPTION.rst b/utils/src/DESCRIPTION.rst
new file mode 100644
index 000000000..68b3a57f2
--- /dev/null
+++ b/utils/src/DESCRIPTION.rst
@@ -0,0 +1,13 @@
+A sample Python project
+=======================
+
+This is the description file for the project.
+
+The file should use UTF-8 encoding and be written using ReStructured Text. It
+will be used to generate the project webpage on PyPI, and should be written for
+that purpose.
+
+Typical contents for this file would include an overview of the project, basic
+usage examples, etc. Generally, including the project changelog in here is not
+a good idea, although a simple "What's New" section for the most recent version
+may be appropriate.
diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in
new file mode 100644
index 000000000..d4153e738
--- /dev/null
+++ b/utils/src/MANIFEST.in
@@ -0,0 +1,9 @@
+include DESCRIPTION.rst
+
+# Include the test suite (FIXME: does not work yet)
+# recursive-include tests *
+
+# If using Python 2.6 or less, then have to include package data, even though
+# it's already declared in setup.py
+include ooinstall/*
+include ansible.cfg
diff --git a/utils/src/data/data_file b/utils/src/data/data_file
new file mode 100644
index 000000000..7c0646bfd
--- /dev/null
+++ b/utils/src/data/data_file
@@ -0,0 +1 @@
+some data \ No newline at end of file
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
new file mode 100644
index 000000000..944dea3b5
--- /dev/null
+++ b/utils/src/ooinstall/__init__.py
@@ -0,0 +1,5 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=missing-docstring
+
+from .oo_config import OOConfig
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
new file mode 100644
index 000000000..ea6ed6574
--- /dev/null
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -0,0 +1,88 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import os
+import yaml
+
+class CallbackModule(object):
+
+ def __init__(self):
+ ######################
+ # This is ugly stoopid. This should be updated in the following ways:
+ # 1) it should probably only be used for the
+ # openshift_facts.yml playbook, so maybe there's some way to check
+ # a variable that's set when that playbook is run?
+ try:
+ self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
+ except KeyError:
+ raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
+ 'variable must be set.')
+ self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
+ os.O_WRONLY)
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ if res['invocation']['module_args'] == 'var=result':
+ facts = res['var']['result']['ansible_facts']['openshift']
+ hosts_yaml = {}
+ hosts_yaml[host] = facts
+ os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res):
+ pass
+
+ def runner_on_async_ok(self, host, res):
+ pass
+
+ def runner_on_async_failed(self, host, res):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ #pylint: disable=too-many-arguments
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
new file mode 100644
index 000000000..3c3f45c3b
--- /dev/null
+++ b/utils/src/ooinstall/cli_installer.py
@@ -0,0 +1,606 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import click
+import os
+import re
+import sys
+from ooinstall import openshift_ansible
+from ooinstall import OOConfig
+from ooinstall.oo_config import Host
+from ooinstall.variants import find_variant, get_variant_version_combos
+
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
+DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
+
+def validate_ansible_dir(path):
+ if not path:
+ raise click.BadParameter('An ansible path must be provided')
+ return path
+ # if not os.path.exists(path)):
+ # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
+
+def is_valid_hostname(hostname):
+ if not hostname or len(hostname) > 255:
+ return False
+ if hostname[-1] == ".":
+ hostname = hostname[:-1] # strip exactly one dot from the right, if present
+ allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+ return all(allowed.match(x) for x in hostname.split("."))
+
+def validate_prompt_hostname(hostname):
+ if '' == hostname or is_valid_hostname(hostname):
+ return hostname
+ raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
+ 'Please double-check this value i' \
+ 'and re-enter it.'.format(hostname))
+
+def get_ansible_ssh_user():
+ click.clear()
+ message = """
+This installation process will involve connecting to remote hosts via ssh. Any
+account may be used however if a non-root account is used it must have
+passwordless sudo access.
+"""
+ click.echo(message)
+ return click.prompt('User for ssh access', default='root')
+
+def list_hosts(hosts):
+ hosts_idx = range(len(hosts))
+ for idx in hosts_idx:
+ click.echo(' {}: {}'.format(idx, hosts[idx]))
+
+def delete_hosts(hosts):
+ while True:
+ list_hosts(hosts)
+ del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
+ 'or n/N to add more hosts', default='n')
+ try:
+ del_idx = int(del_idx)
+ hosts.remove(hosts[del_idx])
+ except IndexError:
+ click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
+ except ValueError:
+ try:
+ response = del_idx.lower()
+ if response in ['y', 'n']:
+ return hosts, response
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ except AttributeError:
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ return hosts, None
+
+def collect_hosts():
+ """
+ Collect host information from user. This will later be filled in using
+ ansible.
+
+ Returns: a list of host information collected from the user
+ """
+ click.clear()
+ click.echo('***Host Configuration***')
+ message = """
+The OpenShift Master serves the API and web console. It also coordinates the
+jobs that have to run across the environment. It can even run the datastore.
+For wizard based installations the database will be embedded. It's possible to
+change this later using etcd from Red Hat Enterprise Linux 7.
+
+Any Masters configured as part of this installation process will also be
+configured as Nodes. This is so that the Master will be able to proxy to Pods
+from the API. By default this Node will be unscheduleable but this can be changed
+after installation with 'oadm manage-node'.
+
+The OpenShift Node provides the runtime environments for containers. It will
+host the required services to be managed by the Master.
+
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
+ """
+ click.echo(message)
+
+ hosts = []
+ more_hosts = True
+ while more_hosts:
+ host_props = {}
+ hostname_or_ip = click.prompt('Enter hostname or IP address:',
+ default='',
+ value_proc=validate_prompt_hostname)
+
+ host_props['connect_to'] = hostname_or_ip
+
+ host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
+ host_props['node'] = True
+
+ #TODO: Reenable this option once container installs are out of tech preview
+ #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ # type=click.Choice(['rpm', 'container']),
+ # default='rpm')
+ #if rpm_or_container == 'container':
+ # host_props['containerized'] = True
+ #else:
+ # host_props['containerized'] = False
+ host_props['containerized'] = False
+
+ host = Host(**host_props)
+
+ hosts.append(host)
+
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+ return hosts
+
+def confirm_hosts_facts(oo_cfg, callback_facts):
+ hosts = oo_cfg.hosts
+ click.clear()
+ message = """
+A list of the facts gathered from the provided hosts follows. Because it is
+often the case that the hostname for a system inside the cluster is different
+from the hostname that is resolveable from command line or web clients
+these settings cannot be validated automatically.
+
+For some cloud providers the installer is able to gather metadata exposed in
+the instance so reasonable defaults will be provided.
+
+Plese confirm that they are correct before moving forward.
+
+"""
+ notes = """
+Format:
+
+connect_to,IP,public IP,hostname,public hostname
+
+Notes:
+ * The installation host is the hostname from the installer's perspective.
+ * The IP of the host should be the internal IP of the instance.
+ * The public IP should be the externally accessible IP associated with the instance
+ * The hostname should resolve to the internal IP from the instances
+ themselves.
+ * The public hostname should resolve to the external ip from hosts outside of
+ the cloud.
+"""
+
+ # For testing purposes we need to click.echo only once, so build up
+ # the message:
+ output = message
+
+ default_facts_lines = []
+ default_facts = {}
+ for h in hosts:
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+ output = "%s\n%s" % (output, ",".join([h.connect_to,
+ h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+
+ output = "%s\n%s" % (output, notes)
+ click.echo(output)
+ facts_confirmed = click.confirm("Do the above facts look correct?")
+ if not facts_confirmed:
+ message = """
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
+""" % oo_cfg.config_path
+ click.echo(message)
+ # Make sure we actually write out the config file.
+ oo_cfg.save_to_disk()
+ sys.exit(0)
+ return default_facts
+
+def get_variant_and_version():
+ message = "\nWhich variant would you like to install?\n\n"
+
+ i = 1
+ combos = get_variant_version_combos()
+ for (variant, version) in combos:
+ message = "%s\n(%s) %s %s" % (message, i, variant.description,
+ version.name)
+ i = i + 1
+
+ click.echo(message)
+ response = click.prompt("Choose a variant from above: ", default=1)
+ product, version = combos[response - 1]
+
+ return product, version
+
+def confirm_continue(message):
+ click.echo(message)
+ click.confirm("Are you ready to continue?", default=False, abort=True)
+ return
+
+def error_if_missing_info(oo_cfg):
+ missing_info = False
+ if not oo_cfg.hosts:
+ missing_info = True
+ click.echo('For unattended installs, hosts must be specified on the '
+ 'command line or in the config file: %s' % oo_cfg.config_path)
+ sys.exit(1)
+
+ if 'ansible_ssh_user' not in oo_cfg.settings:
+ click.echo("Must specify ansible_ssh_user in configuration file.")
+ sys.exit(1)
+
+ # Lookup a variant based on the key we were given:
+ if not oo_cfg.settings['variant']:
+ click.echo("No variant specified in configuration file.")
+ sys.exit(1)
+
+ ver = None
+ if 'variant_version' in oo_cfg.settings:
+ ver = oo_cfg.settings['variant_version']
+ variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
+ if variant is None or version is None:
+ err_variant_name = oo_cfg.settings['variant']
+ if ver:
+ err_variant_name = "%s %s" % (err_variant_name, ver)
+ click.echo("%s is not an installable variant." % err_variant_name)
+ sys.exit(1)
+ oo_cfg.settings['variant_version'] = version.name
+
+ missing_facts = oo_cfg.calc_missing_facts()
+ if len(missing_facts) > 0:
+ missing_info = True
+ click.echo('For unattended installs, facts must be provided for all masters/nodes:')
+ for host in missing_facts:
+ click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
+
+ if missing_info:
+ sys.exit(1)
+
+
+def get_missing_info_from_user(oo_cfg):
+ """ Prompts the user for any information missing from the given configuration. """
+ click.clear()
+
+ message = """
+Welcome to the OpenShift Enterprise 3 installation.
+
+Please confirm that following prerequisites have been met:
+
+* All systems where OpenShift will be installed are running Red Hat Enterprise
+ Linux 7.
+* All systems are properly subscribed to the required OpenShift Enterprise 3
+ repositories.
+* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
+* All systems have working DNS that resolves not only from the perspective of
+ the installer but also from within the cluster.
+
+When the process completes you will have a default configuration for Masters
+and Nodes. For ongoing environment maintenance it's recommended that the
+official Ansible playbooks be used.
+
+For more information on installation prerequisites please see:
+https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
+"""
+ confirm_continue(message)
+ click.clear()
+
+ if oo_cfg.settings.get('ansible_ssh_user', '') == '':
+ oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
+ click.clear()
+
+ if not oo_cfg.hosts:
+ oo_cfg.hosts = collect_hosts()
+ click.clear()
+
+ if oo_cfg.settings.get('variant', '') == '':
+ variant, version = get_variant_and_version()
+ oo_cfg.settings['variant'] = variant.name
+ oo_cfg.settings['variant_version'] = version.name
+ click.clear()
+
+ return oo_cfg
+
+
+def collect_new_nodes():
+ click.clear()
+ click.echo('***New Node Configuration***')
+ message = """
+Add new nodes here
+ """
+ click.echo(message)
+ return collect_hosts()
+
+def get_installed_hosts(hosts, callback_facts):
+ installed_hosts = []
+ for host in hosts:
+ if(host.connect_to in callback_facts.keys()
+ and 'common' in callback_facts[host.connect_to].keys()
+ and callback_facts[host.connect_to]['common'].get('version', '')
+ and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
+ installed_hosts.append(host)
+ return installed_hosts
+
+# pylint: disable=too-many-branches
+# This pylint error will be corrected shortly in separate PR.
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
+
+ # Copy the list of existing hosts so we can remove any already installed nodes.
+ hosts_to_run_on = list(oo_cfg.hosts)
+
+ # Check if master or nodes already have something installed
+ installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
+ if len(installed_hosts) > 0:
+ click.echo('Installed environment detected.')
+ # This check has to happen before we start removing hosts later in this method
+ if not force:
+ if not unattended:
+ click.echo('By default the installer only adds new nodes to an installed environment.')
+ response = click.prompt('Do you want to (1) only add additional nodes or ' \
+ '(2) reinstall the existing hosts ' \
+ 'potentially erasing any custom changes?',
+ type=int)
+ # TODO: this should be reworked with error handling.
+ # Click can certainly do this for us.
+ # This should be refactored as soon as we add a 3rd option.
+ if response == 1:
+ force = False
+ if response == 2:
+ force = True
+
+ # present a message listing already installed hosts and remove hosts if needed
+ for host in installed_hosts:
+ if host.master:
+ click.echo("{} is already an OpenShift Master".format(host))
+ # Masters stay in the list, we need to run against them when adding
+ # new nodes.
+ elif host.node:
+ click.echo("{} is already an OpenShift Node".format(host))
+ # force is only used for reinstalls so we don't want to remove
+ # anything.
+ if not force:
+ hosts_to_run_on.remove(host)
+
+ # Handle the cases where we know about uninstalled systems
+ new_hosts = set(hosts_to_run_on) - set(installed_hosts)
+ if len(new_hosts) > 0:
+ for new_host in new_hosts:
+ click.echo("{} is currently uninstalled".format(new_host))
+
+ # Fall through
+ click.echo('Adding additional nodes...')
+ else:
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional nodes specified: ' \
+ 'aborting. If you want a fresh install, use ' \
+ '`atomic-openshift-installer install --force`')
+ sys.exit(1)
+ else:
+ if not force:
+ new_nodes = collect_new_nodes()
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
+
+ return hosts_to_run_on, callback_facts
+
+
+@click.group()
+@click.pass_context
+@click.option('--unattended', '-u', is_flag=True, default=False)
+@click.option('--configuration', '-c',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-playbook-directory',
+ '-a',
+ type=click.Path(exists=True,
+ file_okay=False,
+ dir_okay=True,
+ readable=True),
+ # callback=validate_ansible_dir,
+ default=DEFAULT_PLAYBOOK_DIR,
+ envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
+@click.option('--ansible-config',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-log-path',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
+#pylint: disable=too-many-arguments
+# Main CLI entrypoint, not much we can do about too many arguments.
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
+ """
+ atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host.
+ It can also be run in unattended mode if provided with a configuration file.
+
+ Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
+ """
+ ctx.obj = {}
+ ctx.obj['unattended'] = unattended
+ ctx.obj['configuration'] = configuration
+ ctx.obj['ansible_config'] = ansible_config
+ ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
+
+ oo_cfg = OOConfig(ctx.obj['configuration'])
+
+ # If no playbook dir on the CLI, check the config:
+ if not ansible_playbook_directory:
+ ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
+ # If still no playbook dir, check for the default location:
+ if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
+ ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
+ validate_ansible_dir(ansible_playbook_directory)
+ oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
+ oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+ ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
+
+ if ctx.obj['ansible_config']:
+ oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
+ elif os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ # If we're installed by RPM this file should exist and we can use it as our default:
+ oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
+
+ oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
+
+ ctx.obj['oo_cfg'] = oo_cfg
+ openshift_ansible.set_config(oo_cfg)
+
+
+@click.command()
+@click.pass_context
+def uninstall(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+ sys.exit(1)
+
+ click.echo("OpenShift will be uninstalled from the following hosts:\n")
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Uninstall cancelled.")
+ sys.exit(0)
+
+ openshift_ansible.run_uninstall_playbook(verbose)
+
+
+@click.command()
+@click.pass_context
+def upgrade(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+
+ # Update config to reflect the version we're targetting, we'll write
+ # to disk once ansible completes successfully, not before.
+ old_variant = oo_cfg.settings['variant']
+ old_version = oo_cfg.settings['variant_version']
+ if oo_cfg.settings['variant'] == 'enterprise':
+ oo_cfg.settings['variant'] = 'openshift-enterprise'
+ version = find_variant(oo_cfg.settings['variant'])[1]
+ oo_cfg.settings['variant_version'] = version.name
+ click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
+ old_variant, old_version, oo_cfg.settings['variant'],
+ oo_cfg.settings['variant_version']))
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Upgrade cancelled.")
+ sys.exit(0)
+
+ retcode = openshift_ansible.run_upgrade_playbook(verbose)
+ if retcode > 0:
+ click.echo("Errors encountered during upgrade, please check %s." %
+ oo_cfg.settings['ansible_log_path'])
+ else:
+ oo_cfg.save_to_disk()
+ click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+
+
+@click.command()
+@click.option('--force', '-f', is_flag=True, default=False)
+@click.pass_context
+def install(ctx, force):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if ctx.obj['unattended']:
+ error_if_missing_info(oo_cfg)
+ else:
+ oo_cfg = get_missing_info_from_user(oo_cfg)
+
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ verbose)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+ oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
+
+ click.echo('Writing config to: %s' % oo_cfg.config_path)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ # TODO: if there are *new* nodes and this is a live install, we may need the user
+ # to confirm the settings for new nodes. Look into this once we're distinguishing
+ # between new and pre-existing nodes.
+ if len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ oo_cfg.save_to_disk()
+
+ click.echo('Ready to run installation process.')
+ message = """
+If changes are needed to the values recorded by the installer please update {}.
+""".format(oo_cfg.config_path)
+ if not ctx.obj['unattended']:
+ confirm_continue(message)
+
+ error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
+ hosts_to_run_on, verbose)
+ if error:
+ # The bootstrap script will print out the log location.
+ message = """
+An error was detected. After resolving the problem please relaunch the
+installation process.
+"""
+ click.echo(message)
+ sys.exit(1)
+ else:
+ message = """
+The installation was successful!
+
+If this is your first time installing please take a look at the Administrator
+Guide for advanced options related to routing, storage, authentication and much
+more:
+
+http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
+"""
+ click.echo(message)
+ click.pause()
+
+cli.add_command(install)
+cli.add_command(upgrade)
+cli.add_command(uninstall)
+
+if __name__ == '__main__':
+ # This is expected behaviour for context passing with click library:
+ # pylint: disable=unexpected-keyword-arg
+ cli(obj={})
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
new file mode 100644
index 000000000..9c97e6e93
--- /dev/null
+++ b/utils/src/ooinstall/oo_config.py
@@ -0,0 +1,218 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+
+import os
+import yaml
+from pkg_resources import resource_filename
+
+PERSIST_SETTINGS = [
+ 'ansible_ssh_user',
+ 'ansible_config',
+ 'ansible_log_path',
+ 'variant',
+ 'variant_version',
+ 'version',
+ ]
+REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+
+
+class OOConfigFileError(Exception):
+ """The provided config file path can't be read/written
+ """
+ pass
+
+
+class OOConfigInvalidHostError(Exception):
+ """ Host in config is missing both ip and hostname. """
+ pass
+
+
+class Host(object):
+ """ A system we will or have installed OpenShift on. """
+ def __init__(self, **kwargs):
+ self.ip = kwargs.get('ip', None)
+ self.hostname = kwargs.get('hostname', None)
+ self.public_ip = kwargs.get('public_ip', None)
+ self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
+
+ # Should this host run as an OpenShift master:
+ self.master = kwargs.get('master', False)
+
+ # Should this host run as an OpenShift node:
+ self.node = kwargs.get('node', False)
+ self.containerized = kwargs.get('containerized', False)
+
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either and 'ip' " \
+ "or 'hostname' to connect to.")
+
+ if self.master is False and self.node is False:
+ raise OOConfigInvalidHostError(
+ "You must specify each host as either a master or a node.")
+
+ def __str__(self):
+ return self.connect_to
+
+ def __repr__(self):
+ return self.connect_to
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+ for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
+ 'master', 'node', 'containerized', 'connect_to']:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ return d
+
+
+class OOConfig(object):
+ default_dir = os.path.normpath(
+ os.environ.get('XDG_CONFIG_HOME',
+ os.environ['HOME'] + '/.config/') + '/openshift/')
+ default_file = '/installer.cfg.yml'
+
+ def __init__(self, config_path):
+ if config_path:
+ self.config_path = os.path.normpath(config_path)
+ else:
+ self.config_path = os.path.normpath(self.default_dir +
+ self.default_file)
+ self.settings = {}
+ self._read_config()
+ self._set_defaults()
+
+ def _read_config(self):
+ self.hosts = []
+ try:
+ if os.path.exists(self.config_path):
+ cfgfile = open(self.config_path, 'r')
+ self.settings = yaml.safe_load(cfgfile.read())
+ cfgfile.close()
+
+ # Use the presence of a Description as an indicator this is
+ # a legacy config file:
+ if 'Description' in self.settings:
+ self._upgrade_legacy_config()
+
+ # Parse the hosts into DTO objects:
+ if 'hosts' in self.settings:
+ for host in self.settings['hosts']:
+ self.hosts.append(Host(**host))
+
+ # Watchout for the variant_version coming in as a float:
+ if 'variant_version' in self.settings:
+ self.settings['variant_version'] = \
+ str(self.settings['variant_version'])
+
+ except IOError, ferr:
+ raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
+ ferr.strerror))
+ except yaml.scanner.ScannerError:
+ raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
+
+ def _upgrade_legacy_config(self):
+ new_hosts = []
+ remove_settings = ['validated_facts', 'Description', 'Name',
+ 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
+
+ if 'validated_facts' in self.settings:
+ for key, value in self.settings['validated_facts'].iteritems():
+ value['connect_to'] = key
+ if 'masters' in self.settings and key in self.settings['masters']:
+ value['master'] = True
+ if 'nodes' in self.settings and key in self.settings['nodes']:
+ value['node'] = True
+ new_hosts.append(value)
+ self.settings['hosts'] = new_hosts
+
+ for s in remove_settings:
+ if s in self.settings:
+ del self.settings[s]
+
+ # A legacy config implies openshift-enterprise 3.0:
+ self.settings['variant'] = 'openshift-enterprise'
+ self.settings['variant_version'] = '3.0'
+
+ def _set_defaults(self):
+
+ if 'ansible_inventory_directory' not in self.settings:
+ self.settings['ansible_inventory_directory'] = \
+ self._default_ansible_inv_dir()
+ if not os.path.exists(self.settings['ansible_inventory_directory']):
+ os.makedirs(self.settings['ansible_inventory_directory'])
+ if 'ansible_plugins_directory' not in self.settings:
+ self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
+ if 'version' not in self.settings:
+ self.settings['version'] = 'v1'
+
+ if 'ansible_callback_facts_yaml' not in self.settings:
+ self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
+ self.settings['ansible_inventory_directory']
+
+ if 'ansible_ssh_user' not in self.settings:
+ self.settings['ansible_ssh_user'] = ''
+
+ self.settings['ansible_inventory_path'] = '{}/hosts'.format(self.settings['ansible_inventory_directory'])
+
+ # clean up any empty sets
+ for setting in self.settings.keys():
+ if not self.settings[setting]:
+ self.settings.pop(setting)
+
+ def _default_ansible_inv_dir(self):
+ return os.path.normpath(
+ os.path.dirname(self.config_path) + "/.ansible")
+
+ def calc_missing_facts(self):
+ """
+ Determine which host facts are not defined in the config.
+
+ Returns a hash of host to a list of the missing facts.
+ """
+ result = {}
+
+ for host in self.hosts:
+ missing_facts = []
+ for required_fact in REQUIRED_FACTS:
+ if not getattr(host, required_fact):
+ missing_facts.append(required_fact)
+ if len(missing_facts) > 0:
+ result[host.connect_to] = missing_facts
+ return result
+
+ def save_to_disk(self):
+ out_file = open(self.config_path, 'w')
+ out_file.write(self.yaml())
+ out_file.close()
+
+ def persist_settings(self):
+ p_settings = {}
+ for setting in PERSIST_SETTINGS:
+ if setting in self.settings and self.settings[setting]:
+ p_settings[setting] = self.settings[setting]
+ p_settings['hosts'] = []
+ for host in self.hosts:
+ p_settings['hosts'].append(host.to_dict())
+
+ if self.settings['ansible_inventory_directory'] != \
+ self._default_ansible_inv_dir():
+ p_settings['ansible_inventory_directory'] = \
+ self.settings['ansible_inventory_directory']
+
+ return p_settings
+
+ def yaml(self):
+ return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
+
+ def __str__(self):
+ return self.yaml()
+
+ def get_host(self, name):
+ for host in self.hosts:
+ if host.connect_to == name:
+ return host
+ return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
new file mode 100644
index 000000000..fdd0c1168
--- /dev/null
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -0,0 +1,179 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+
+import socket
+import subprocess
+import sys
+import os
+import yaml
+from ooinstall.variants import find_variant
+
+CFG = None
+
+def set_config(cfg):
+ global CFG
+ CFG = cfg
+
+def generate_inventory(hosts):
+ global CFG
+
+ base_inventory_path = CFG.settings['ansible_inventory_path']
+ base_inventory = open(base_inventory_path, 'w')
+ base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
+ base_inventory.write('\n[OSEv3:vars]\n')
+ base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+ if CFG.settings['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_become=true\n')
+
+ # Find the correct deployment type for ansible:
+ ver = find_variant(CFG.settings['variant'],
+ version=CFG.settings.get('variant_version', None))[1]
+ base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
+
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_additional_registries={}\n'
+ .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_insecure_registries={}\n'
+ .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
+ "'name': 'ose-devel', "
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
+
+ base_inventory.write('\n[masters]\n')
+ masters = (host for host in hosts if host.master)
+ for master in masters:
+ write_host(master, base_inventory)
+ base_inventory.write('\n[nodes]\n')
+ nodes = (host for host in hosts if host.node)
+ for node in nodes:
+ # TODO: Until the Master can run the SDN itself we have to configure the Masters
+ # as Nodes too.
+ scheduleable = True
+ # If there's only one Node and it's also a Master we want it to be scheduleable:
+ if node in masters and len(masters) != 1:
+ scheduleable = False
+ write_host(node, base_inventory, scheduleable)
+ base_inventory.close()
+ return base_inventory_path
+
+
+def write_host(host, inventory, scheduleable=True):
+ global CFG
+
+ facts = ''
+ if host.ip:
+ facts += ' openshift_ip={}'.format(host.ip)
+ if host.public_ip:
+ facts += ' openshift_public_ip={}'.format(host.public_ip)
+ if host.hostname:
+ facts += ' openshift_hostname={}'.format(host.hostname)
+ if host.public_hostname:
+ facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ # TODO: For not write_host is handles both master and nodes.
+ # Technically only nodes will ever need this.
+ if not scheduleable:
+ facts += ' openshift_scheduleable=False'
+ installer_host = socket.gethostname()
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo openshift'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=true'
+
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
+
+
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
+ """
+ Retrieves system facts from the remote systems.
+ """
+ FNULL = open(os.devnull, 'w')
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
+ if not status == 0:
+ return [], 1
+ callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
+ callback_facts = yaml.load(callback_facts_file)
+ callback_facts_file.close()
+ return callback_facts, 0
+
+
+def default_facts(hosts, verbose=False):
+ global CFG
+ inventory_file = generate_inventory(hosts)
+ os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
+
+ facts_env = os.environ.copy()
+ facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
+ facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ if 'ansible_log_path' in CFG.settings:
+ facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
+
+
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
+ global CFG
+ inventory_file = generate_inventory(hosts_to_run_on)
+ if len(hosts_to_run_on) != len(hosts):
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/common/openshift-cluster/scaleup.yml')
+ else:
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/config.yml')
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
+
+
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ return subprocess.call(args, env=env_vars)
+
+
+def run_uninstall_playbook(verbose=False):
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/adhoc/uninstall.yml')
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
+
+def run_upgrade_playbook(verbose=False):
+ # TODO: do not hardcode the upgrade playbook, add ability to select the
+ # right playbook depending on the type of upgrade.
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml')
+ # TODO: Upgrade inventory for upgrade?
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
new file mode 100644
index 000000000..3bb61dddb
--- /dev/null
+++ b/utils/src/ooinstall/variants.py
@@ -0,0 +1,77 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-few-public-methods
+
+"""
+Defines the supported variants and versions the installer supports, and metadata
+required to run Ansible correctly.
+
+This module needs to be updated for each major release to allow the new version
+to be specified by the user, and to point the generic variants to the latest
+version.
+"""
+
+
+class Version(object):
+ def __init__(self, name, ansible_key):
+ self.name = name # i.e. 3.0, 3.1
+
+ self.ansible_key = ansible_key
+
+
+class Variant(object):
+ def __init__(self, name, description, versions):
+ # Supported variant name:
+ self.name = name
+
+ # Friendly name for the variant:
+ self.description = description
+
+ self.versions = versions
+
+ def latest_version(self):
+ return self.versions[-1]
+
+
+# WARNING: Keep the versions ordered, most recent last:
+OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
+ [
+ Version('3.0', 'enterprise'),
+ Version('3.1', 'openshift-enterprise')
+ ]
+)
+
+AEP = Variant('atomic-enterprise', 'Atomic Enterprise Platform',
+ [
+ Version('3.1', 'atomic-enterprise')
+ ]
+)
+
+# Ordered list of variants we can install, first is the default.
+SUPPORTED_VARIANTS = (OSE, AEP)
+
+
+def find_variant(name, version=None):
+ """
+ Locate the variant object for the variant given in config file, and
+ the correct version to use for it.
+ Return (None, None) if we can't find a match.
+ """
+ prod = None
+ for prod in SUPPORTED_VARIANTS:
+ if prod.name == name:
+ if version is None:
+ return (prod, prod.latest_version())
+ for v in prod.versions:
+ if v.name == version:
+ return (prod, v)
+
+ return (None, None)
+
+def get_variant_version_combos():
+ combos = []
+ for variant in SUPPORTED_VARIANTS:
+ for ver in variant.versions:
+ combos.append((variant, ver))
+ return combos
+
diff --git a/utils/test/__init__.py b/utils/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/__init__.py
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
new file mode 100644
index 000000000..fc16d9ceb
--- /dev/null
+++ b/utils/test/cli_installer_tests.py
@@ -0,0 +1,629 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import copy
+import os
+import ConfigParser
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from click.testing import CliRunner
+from test.oo_config_tests import OOInstallFixture
+from mock import patch
+
+
+MOCK_FACTS = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+}
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exception is not None or result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _read_yaml(self, config_file_path):
+ f = open(config_file_path, 'r')
+ config = yaml.safe_load(f.read())
+ f.close()
+ return config
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ print written_config['hosts']
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue(h['node'])
+ self.assertTrue('ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('public_hostname' in h)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = self._read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+class UnattendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ self.cli_args.append("-u")
+
+ # unattended with config file and all installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ if result.exception is None or result.exit_code != 1:
+ print "Exit code: %s" % result.exit_code
+ self.fail("Unexpected CLI return")
+
+ # unattended with config file and all installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=False)
+
+ # unattended with config file and no installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and some installed some uninstalled hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+ # unattended with config file and some installed some uninstalled hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(3, len(hosts))
+ self.assertEquals(3, len(hosts_to_run_on))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_inventory_write(self, load_facts_mock, run_playbook_mock):
+
+ # Add an ssh user so we can verify it makes it to the inventory file:
+ merged_config = "%s\n%s" % (SAMPLE_CONFIG % 'openshift-enterprise',
+ "ansible_ssh_user: bob")
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), merged_config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Check the inventory file looks as we would expect:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('bob',
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # Check the masters:
+ self.assertEquals(1, len(inventory.items('masters')))
+ self.assertEquals(3, len(inventory.items('nodes')))
+
+ for item in inventory.items('masters'):
+ # ansible host lines do NOT parse nicely:
+ master_line = item[0]
+ if item[1] is not None:
+ master_line = "%s=%s" % (master_line, item[1])
+ self.assertTrue('openshift_ip' in master_line)
+ self.assertTrue('openshift_public_ip' in master_line)
+ self.assertTrue('openshift_hostname' in master_line)
+ self.assertTrue('openshift_public_hostname' in master_line)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_latest_assumed(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = self._read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # We didn't specify a version so the latest should have been assumed,
+ # and written to disk:
+ self.assertEquals('3.1', written_config['variant_version'])
+
+ # Make sure the correct value was passed to ansible:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_preserved(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = '%s\n%s' % (config, 'variant_version: 3.0')
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = self._read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # Make sure our older version was preserved:
+ # and written to disk:
+ self.assertEquals('3.0', written_config['variant_version'])
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.openshift_ansible.run_ansible')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, None, None)
+
+ @patch('ooinstall.openshift_ansible.run_ansible')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, ansible_config, ansible_config)
+
+ @patch('ooinstall.openshift_ansible.run_ansible')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ansible_config_specified_in_installer_config(self,
+ load_facts_mock, run_ansible_mock):
+
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = "%s\nansible_config: %s" % (config, ansible_config)
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, None, ansible_config)
+
+ #pylint: disable=too-many-arguments
+ # This method allows for drastically simpler tests to write, and the args
+ # are all useful.
+ def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
+ installer_config, ansible_config_cli=None, expected_result=None):
+ """
+ Utility method for testing the ways you can specify the ansible config.
+ """
+
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), installer_config)
+
+ self.cli_args.extend(["-c", config_file])
+ if ansible_config_cli:
+ self.cli_args.extend(["--ansible-config", ansible_config_cli])
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Test the env vars for facts playbook:
+ facts_env_vars = load_facts_mock.call_args[0][2]
+ if expected_result:
+ self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
+ else:
+ self.assertFalse('ANSIBLE_CONFIG' in facts_env_vars)
+
+ # Test the env vars for main playbook:
+ env_vars = run_ansible_mock.call_args[0][2]
+ if expected_result:
+ self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
+ else:
+ self.assertFalse('ANSIBLE_CONFIG' in env_vars)
+
+
+class AttendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ # Doesn't exist but keeps us from reading the local users config:
+ self.config_file = os.path.join(self.work_dir, 'config.yml')
+ self.cli_args.extend(["-c", self.config_file])
+
+ #pylint: disable=too-many-arguments
+ def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None):
+ """
+ Builds a CLI input string with newline characters to simulate
+ the full run.
+ This gives us only one place to update when the input prompts change.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if hosts:
+ i = 0
+ for (host, is_master) in hosts:
+ inputs.append(host)
+ inputs.append('y' if is_master else 'n')
+ #inputs.append('rpm')
+ if i < len(hosts) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master) in add_nodes:
+ inputs.append(host)
+ inputs.append('y' if is_master else 'n')
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ ])
+
+ return '\n'.join(inputs)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', False),
+ ('10.0.0.3', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 3)
+
+ # interactive with config file and some installed some uninstalled hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_add_nodes(self, load_facts_mock, run_playbook_mock):
+
+ # Modify the mock facts to return a version indicating OpenShift
+ # is already installed on our master, and the first node.
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', False),
+ ],
+ add_nodes=[('10.0.0.3', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 2)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 3)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
+ cli_input = self._build_input(confirm_facts='y')
+ self.cli_args.extend(["-c", config_file])
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = self._read_yaml(config_file)
+ self._verify_config_hosts(written_config, 3)
+
+ #interactive with config file and all installed hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ],
+ add_nodes=[('10.0.0.2', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+ run_playbook_mock,
+ cli_input,
+ exp_hosts_len=2,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+# TODO: test with config file, attended add node
+# TODO: test with config file, attended new node already in config file
+# TODO: test with config file, attended new node already in config file, plus manually added nodes
+# TODO: test with config file, attended reject facts
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
new file mode 100644
index 000000000..0dd4a30e9
--- /dev/null
+++ b/utils/test/oo_config_tests.py
@@ -0,0 +1,228 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
+
+SAMPLE_CONFIG = """
+variant: openshift-enterprise
+ansible_ssh_user: root
+hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+# Used to test automatic upgrading of config:
+LEGACY_CONFIG = """
+Description: This is the configuration file for the OpenShift Ansible-Based Installer.
+Name: OpenShift Ansible-Based Installer Configuration
+Subscription: {type: none}
+Vendor: OpenShift Community
+Version: 0.0.1
+ansible_config: /tmp/notreal/ansible.cfg
+ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible
+ansible_log_path: /tmp/ansible.log
+ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins
+masters: [10.0.0.1]
+nodes: [10.0.0.2, 10.0.0.3]
+validated_facts:
+ 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1}
+ 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2}
+ 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3}
+"""
+
+
+CONFIG_INCOMPLETE_FACTS = """
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
+ public_ip: 24.222.0.2
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ node: true
+"""
+
+
+class OOInstallFixture(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='ooconfigtests')
+ self.tempfiles.append(self.work_dir)
+
+ def tearDown(self):
+ for path in self.tempfiles:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+
+ def write_config(self, path, config_str):
+ """
+ Write given config to a temporary file which will be cleaned
+ up in teardown.
+ Returns full path to the file.
+ """
+ cfg_file = open(path, 'w')
+ cfg_file.write(config_str)
+ cfg_file.close()
+ return path
+
+
+class LegacyOOConfigTests(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), LEGACY_CONFIG)
+ self.cfg = OOConfig(self.cfg_path)
+
+ def test_load_config_memory(self):
+ self.assertEquals('openshift-enterprise', self.cfg.settings['variant'])
+ self.assertEquals('3.0', self.cfg.settings['variant_version'])
+ self.assertEquals('v1', self.cfg.settings['version'])
+
+ self.assertEquals(3, len(self.cfg.hosts))
+ h1 = self.cfg.get_host('10.0.0.1')
+ self.assertEquals('10.0.0.1', h1.ip)
+ self.assertEquals('24.222.0.1', h1.public_ip)
+ self.assertEquals('master-private.example.com', h1.hostname)
+ self.assertEquals('master.example.com', h1.public_hostname)
+
+ h2 = self.cfg.get_host('10.0.0.2')
+ self.assertEquals('10.0.0.2', h2.ip)
+ self.assertEquals('24.222.0.2', h2.public_ip)
+ self.assertEquals('node1-private.example.com', h2.hostname)
+ self.assertEquals('node1.example.com', h2.public_hostname)
+
+ h3 = self.cfg.get_host('10.0.0.3')
+ self.assertEquals('10.0.0.3', h3.ip)
+ self.assertEquals('24.222.0.3', h3.public_ip)
+ self.assertEquals('node2-private.example.com', h3.hostname)
+ self.assertEquals('node2.example.com', h3.public_hostname)
+
+ self.assertFalse('masters' in self.cfg.settings)
+ self.assertFalse('nodes' in self.cfg.settings)
+ self.assertFalse('Description' in self.cfg.settings)
+ self.assertFalse('Name' in self.cfg.settings)
+ self.assertFalse('Subscription' in self.cfg.settings)
+ self.assertFalse('Vendor' in self.cfg.settings)
+ self.assertFalse('Version' in self.cfg.settings)
+ self.assertFalse('validates_facts' in self.cfg.settings)
+
+
+class OOConfigTests(OOInstallFixture):
+
+ def test_load_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+
+ self.assertEquals(3, len(ooconfig.hosts))
+ self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to)
+ self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip)
+ self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname)
+
+ self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
+ [host['ip'] for host in ooconfig.settings['hosts']])
+
+ self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+ self.assertEquals('v1', ooconfig.settings['version'])
+
+ def test_load_complete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(0, len(missing_host_facts))
+
+ # Test missing optional facts the user must confirm:
+ def test_load_host_incomplete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(2, len(missing_host_facts))
+ self.assertEquals(1, len(missing_host_facts['10.0.0.2']))
+ self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
+
+ def test_write_config(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ ooconfig.save_to_disk()
+
+ f = open(cfg_path, 'r')
+ written_config = yaml.safe_load(f.read())
+ f.close()
+
+ self.assertEquals(3, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue('ip' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_hostname' in h)
+
+ self.assertTrue('ansible_ssh_user' in written_config)
+ self.assertTrue('variant' in written_config)
+ self.assertEquals('v1', written_config['version'])
+
+ # Some advanced settings should not get written out if they
+ # were not specified by the user:
+ self.assertFalse('ansible_inventory_directory' in written_config)
+
+
+class HostTests(OOInstallFixture):
+
+ def test_load_host_no_ip_or_hostname(self):
+ yaml_props = {
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'master': True
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_load_host_no_master_or_node_specified(self):
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+
+
+
diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh
new file mode 100644
index 000000000..040a9a84d
--- /dev/null
+++ b/utils/workflows/enterprise_deploy/openshift.sh
@@ -0,0 +1,2 @@
+# This file is not used for OpenShift 3.0. It's merely an artifact of the the
+# installation framework originally used for OpenShift 2.x.