summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_AWS.md43
-rw-r--r--README_OSE.md2
-rw-r--r--README_origin.md9
-rw-r--r--README_vagrant.md30
-rw-r--r--Vagrantfile41
-rwxr-xr-xbin/cluster15
l---------bin/openshift_ansible/aws1
-rwxr-xr-xbin/ossh_bash_completion21
-rw-r--r--bin/ossh_zsh_completion13
-rw-r--r--docs/best_practices_guide.adoc43
-rw-r--r--docs/style_guide.adoc2
-rw-r--r--filter_plugins/oo_filters.py22
-rw-r--r--filter_plugins/oo_zabbix_filters.py79
-rw-r--r--git/.pylintrc6
-rwxr-xr-xgit/pylint.sh6
-rw-r--r--inventory/aws/hosts/hosts2
-rw-r--r--inventory/byo/.gitignore1
-rw-r--r--inventory/byo/hosts43
-rw-r--r--inventory/byo/hosts.example76
-rw-r--r--inventory/gce/hosts/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts2
-rwxr-xr-xinventory/multi_ec2.py2
-rw-r--r--inventory/openstack/hosts/hosts2
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml142
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j216
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml51
-rw-r--r--playbooks/adhoc/zabbix_setup/create_template.yml57
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/adhoc/zabbix_setup/setup_zabbix.yml38
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml11
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_host.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_master.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_node.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml90
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_router.yml27
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml17
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py2
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml37
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml10
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml10
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml10
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml10
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml12
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/openshift-cluster/config.yml9
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml8
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml5
-rw-r--r--playbooks/common/openshift-master/config.yml29
-rw-r--r--playbooks/common/openshift-node/config.yml11
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml20
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml15
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml17
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml9
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml103
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml9
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml24
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml25
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/etcd_ca/meta/main.yml2
-rw-r--r--roles/fluentd_master/tasks/main.yml3
-rw-r--r--roles/openshift_common/README.md2
-rw-r--r--roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json2
-rw-r--r--roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json23
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json34
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp.json16
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json32
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer.json16
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json32
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/django.json16
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json32
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs.json16
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json34
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/amq6.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json29
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json29
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json13
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json15
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json16
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json11
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json13
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json11
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json13
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json14
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json14
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py45
-rw-r--r--roles/openshift_facts/tasks/main.yml1
-rw-r--r--roles/openshift_manage_node/tasks/main.yml7
-rw-r--r--roles/openshift_master/README.md2
-rw-r--r--roles/openshift_master/defaults/main.yml6
-rw-r--r--roles/openshift_master/handlers/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml26
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j215
-rw-r--r--roles/openshift_master/templates/scheduler.json.j21
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j214
-rw-r--r--roles/openshift_master_ca/meta/main.yml2
-rw-r--r--roles/openshift_master_ca/tasks/main.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml32
-rw-r--r--roles/openshift_master_certificates/vars/main.yml3
-rw-r--r--roles/openshift_master_cluster/README.md34
-rw-r--r--roles/openshift_master_cluster/meta/main.yml16
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml44
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml13
-rw-r--r--roles/openshift_node/README.md14
-rw-r--r--roles/openshift_node/tasks/main.yml6
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j23
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_repos/README.md2
-rw-r--r--roles/os_zabbix/library/__init__.py0
-rw-r--r--roles/os_zabbix/library/test.yml92
-rw-r--r--roles/os_zabbix/library/zbx_host.py162
-rw-r--r--roles/os_zabbix/library/zbx_hostgroup.py116
-rw-r--r--roles/os_zabbix/library/zbx_item.py160
-rw-r--r--roles/os_zabbix/library/zbx_mediatype.py149
-rw-r--r--roles/os_zabbix/library/zbx_template.py126
-rw-r--r--roles/os_zabbix/library/zbx_trigger.py175
-rw-r--r--roles/os_zabbix/library/zbx_user.py146
-rw-r--r--roles/os_zabbix/library/zbx_usergroup.py160
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py382
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml5
-rw-r--r--roles/rhel_subscribe/tasks/main.yml29
-rw-r--r--roles/rhel_unsubscribe/tasks/main.yml5
150 files changed, 3237 insertions, 885 deletions
diff --git a/README_AWS.md b/README_AWS.md
index 0e3128a92..c511741b9 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -22,6 +22,27 @@ Note: You must source this file before running any Ansible commands.
Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
+Subscribe to CentOS
+-------------------
+
+1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
+
+
+Set up Security Group
+---------------------
+By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
+You may also want to allow access from the outside world on the following ports:
+
+```
+• 22 - ssh
+• 80 - Web Apps
+• 443 - Web Apps (https)
+• 4789 - SDN / VXLAN
+• 8443 - Openshift Console
+• 10250 - kubelet
+```
+
+
(Optional) Setup your $HOME/.ssh/config file
-------------------------------------------
In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
@@ -40,7 +61,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
By default, a cluster is launched with the following configuration:
-- Instance type: m3.large
+- Instance type: m4.large
- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
- Region: us-east-1
- Keypair name: libra
@@ -62,7 +83,7 @@ Node specific defaults:
If needed, these values can be changed by setting environment variables on your system.
-- export ec2_instance_type='m3.large'
+- export ec2_instance_type='m4.large'
- export ec2_image='ami-307b3658'
- export ec2_region='us-east-1'
- export ec2_keypair='libra'
@@ -130,3 +151,21 @@ The --deployment-type flag can be passed to bin/cluster to specify the deploymen
bin/cluster create aws --deployment-type=online <cluster-id>
```
Note: If no deployment type is specified, then the default is origin.
+
+
+## Post-ansible steps
+Create the default router
+-------------------------
+On the master host:
+```sh
+oadm router --create=true \
+ --credentials=/etc/openshift/master/openshift-router.kubeconfig
+```
+
+Create the default docker-registry
+----------------------------------
+On the master host:
+```sh
+oadm registry --create=true \
+ --credentials=/etc/openshift/master/openshift-registry.kubeconfig
+``` \ No newline at end of file
diff --git a/README_OSE.md b/README_OSE.md
index 471fdcf9e..31173e5d6 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -46,7 +46,7 @@ subscription-manager repos --disable="*"
subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
---enable="rhel-server-7-ose-beta-rpms"
+--enable="rhel-7-server-ose-3.0-rpms"
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
diff --git a/README_origin.md b/README_origin.md
index 2ccedf4d5..f13fe660a 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -35,7 +35,7 @@ subscription-manager repos --disable="*"
subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
---enable="rhel-server-7-ose-beta-rpms"
+--enable="rhel-7-server-ose-3.0-rpms"
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
@@ -99,10 +99,13 @@ oadm router --create=true \
On the master host:
```sh
oadm registry --create=true \
- --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
- --mount-host=/var/lib/openshift/docker-registry
+ --credentials=/etc/openshift/master/openshift-registry.kubeconfig
```
+If you would like persistent storage, refer to the
+[OpenShift documentation](https://docs.openshift.org/latest/admin_guide/install/docker_registry.html)
+for more information on deployment options for the built in docker-registry.
+
## Overriding detected ip addresses and hostnames
Some deployments will require that the user override the detected hostnames
and ip addresses for the hosts. To see what the default values will be you can
diff --git a/README_vagrant.md b/README_vagrant.md
index e3b3b5551..5f87d6633 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -1,10 +1,29 @@
Requirements
------------
- vagrant (tested against version 1.7.2)
-- vagrant-hostmaster plugin (tested against version 1.5.0)
+- vagrant-hostmanager plugin (tested against version 1.5.0)
+- vagrant-registration plugin (only required for enterprise deployment type)
- vagrant-libvirt (tested against version 0.0.26)
- Only required if using libvirt instead of virtualbox
+For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant:
+
+1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads)
+
+2. Install it into vagrant
+
+ ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box``
+
+3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt)
+
+ Resize the actual qcow2 image:
+
+ ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB``
+
+ Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this:
+
+ ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}``
+
Usage
-----
```
@@ -21,5 +40,10 @@ vagrant provision
Environment Variables
---------------------
The following environment variables can be overriden:
-- OPENSHIFT_DEPLOYMENT_TYPE (defaults to origin, choices: origin, enterprise, online)
-- OPENSHIFT_NUM_NODES (the number of nodes to create, defaults to 2)
+- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
+- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
+
+For ``enterprise`` deployment types these env variables should also be specified:
+- ``rhel_subscription_user``: rhsm user
+- ``rhel_subscription_pass``: rhsm password
+- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects
diff --git a/Vagrantfile b/Vagrantfile
index a832ae84e..4675b5d60 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -15,6 +15,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.hostmanager.manage_host = true
config.hostmanager.include_offline = true
config.ssh.insert_key = false
+
+ if deployment_type === 'enterprise'
+ unless Vagrant.has_plugin?('vagrant-registration')
+ raise 'vagrant-registration-plugin is required for enterprise deployment'
+ end
+ username = ENV['rhel_subscription_user']
+ password = ENV['rhel_subscription_pass']
+ unless username and password
+ raise 'rhel_subscription_user and rhel_subscription_pass are required'
+ end
+ config.registration.username = username
+ config.registration.password = password
+ # FIXME this is temporary until vagrant/ansible registration modules
+ # are capable of handling specific subscription pools
+ if not ENV['rhel_subscription_pool'].nil?
+ config.vm.provision "shell" do |s|
+ s.inline = "subscription-manager attach --pool=$1 || true"
+ s.args = "#{ENV['rhel_subscription_pool']}"
+ end
+ end
+ end
+
config.vm.provider "virtualbox" do |vbox, override|
override.vm.box = "chef/centos-7.1"
vbox.memory = 1024
@@ -28,10 +50,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
libvirt.cpus = 2
libvirt.memory = 1024
libvirt.driver = 'kvm'
- override.vm.box = "centos-7.1"
- override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
- override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
- override.vm.box_download_checksum_type = "sha256"
+ case deployment_type
+ when "enterprise"
+ override.vm.box = "rhel-7"
+ when "origin"
+ override.vm.box = "centos-7.1"
+ override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
+ override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
+ override.vm.box_download_checksum_type = "sha256"
+ end
end
num_nodes.times do |n|
@@ -53,12 +80,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
ansible.sudo = true
ansible.groups = {
"masters" => ["master"],
- "nodes" => ["node1", "node2"],
+ "nodes" => ["master", "node1", "node2"],
}
ansible.extra_vars = {
- openshift_deployment_type: "origin",
+ deployment_type: deployment_type,
}
- ansible.playbook = "playbooks/byo/config.yml"
+ ansible.playbook = "playbooks/byo/vagrant.yml"
end
end
end
diff --git a/bin/cluster b/bin/cluster
index 746c0349a..c80fe0cab 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -23,6 +23,16 @@ class Cluster(object):
'-o ControlMaster=auto '
'-o ControlPersist=600s '
)
+ # Because of `UserKnownHostsFile=/dev/null`
+ # our `.ssh/known_hosts` file most probably misses the ssh host public keys
+ # of our servers.
+ # In that case, ansible serializes the execution of ansible modules
+ # because we might be interactively prompted to accept the ssh host public keys.
+ # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
+ # So, we don't want our modules execution to be serialized.
+ os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ # TODO: A more secure way to proceed would consist in dynamically
+ # retrieving the ssh host public keys from the IaaS interface
def get_deployment_type(self, args):
"""
@@ -51,6 +61,7 @@ class Cluster(object):
env['num_masters'] = args.masters
env['num_nodes'] = args.nodes
+ env['num_infra'] = args.infra
env['num_etcd'] = args.etcd
return self.action(args, inventory, env, playbook)
@@ -149,7 +160,7 @@ class Cluster(object):
boto_conf_files = ['~/.aws/credentials', '~/.boto']
conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
- boto_configs = [ conf for conf in boto_conf_files if conf_exists(conf)]
+ boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
if len(key_missing) > 0 and len(boto_configs) == 0:
raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(missing))
@@ -262,6 +273,8 @@ if __name__ == '__main__':
help='number of masters to create in cluster')
create_parser.add_argument('-n', '--nodes', default=2, type=int,
help='number of nodes to create in cluster')
+ create_parser.add_argument('-i', '--infra', default=1, type=int,
+ help='number of infra nodes to create in cluster')
create_parser.add_argument('-e', '--etcd', default=0, type=int,
help='number of external etcd hosts to create in cluster')
create_parser.set_defaults(func=cluster.create)
diff --git a/bin/openshift_ansible/aws b/bin/openshift_ansible/aws
new file mode 120000
index 000000000..eb0575b4d
--- /dev/null
+++ b/bin/openshift_ansible/aws
@@ -0,0 +1 @@
+../../inventory/aws/ \ No newline at end of file
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 1467de858..5072161f0 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,6 +1,12 @@
__ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -19,8 +25,15 @@ _ossh()
complete -F _ossh ossh oscp
__opssh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 6ab930dc4..44500c618 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -1,9 +1,16 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ fi
}
_ossh(){
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 6aaf5228a..a146b93ad 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -147,7 +147,40 @@ Every effort should be made to keep our Ansible YAML files in pure YAML.
[cols="2v,v"]
|===
| **Rule**
-| Parameters to Ansible Modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
+| Custom Ansible modules SHOULD be embedded in a role.
+|===
+
+.Context
+* http://docs.ansible.com/ansible/playbooks_roles.html#embedding-modules-in-roles[Ansible doc on how to embed modules in roles]
+
+The purpose of this rule is to make it easy to include custom modules in our playbooks and share them on Ansible Galaxy.
+
+.Custom module `openshift_facts.py` is embedded in the `openshift_facts` role.
+----
+> ll openshift-ansible/roles/openshift_facts/library/
+-rwxrwxr-x. 1 user group 33616 Jul 22 09:36 openshift_facts.py
+----
+
+.Custom module `openshift_facts` can be used after `openshift_facts` role has been referenced.
+[source,yaml]
+----
+- hosts: openshift_hosts
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - openshift_facts
+ role: common
+ hostname: host
+ public_hostname: host.example.com
+----
+
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
@@ -174,7 +207,7 @@ When a module has several parameters that are being passed in, it's hard to see
[cols="2v,v"]
|===
| **Rule**
-| Parameters to Ansible Modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
+| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
@@ -198,10 +231,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It
[cols="2v,v"]
|===
| **Rule**
-| The Ansible `shell` module SHOULD NOT be used. Instead, use the `command` module.
+| The Ansible `command` module SHOULD be used instead of the Ansible `shell` module.
|===
.Context
-* http://docs.ansible.com/shell_module.html#notes[Ansible Doc on why using the command module is a best practice]
+* http://docs.ansible.com/shell_module.html#notes[Ansible doc on why using the command module is a best practice]
The Ansible `shell` module can run most commands that can be run from a bash CLI. This makes it extremely powerful, but it also opens our playbooks up to being exploited by attackers.
@@ -224,7 +257,7 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI
| The Ansible `quote` filter MUST be used with any variable passed into the shell module.
|===
.Context
-* http://docs.ansible.com/shell_module.html#notes[Ansible Doc describing why to use the quote filter]
+* http://docs.ansible.com/shell_module.html#notes[Ansible doc describing why to use the quote filter]
It is recommended not to use the `shell` module. However, if it absolutely must be used, all variables passed into the `shell` module MUST use the `quote` filter to ensure they are shell safe.
diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc
index 110e7153f..09d4839c7 100644
--- a/docs/style_guide.adoc
+++ b/docs/style_guide.adoc
@@ -65,7 +65,7 @@ Example: `tasks.yml`
| Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_
|===
-Ansible allows variables to be passed in on the command line using the `-e` option. These variables MUST have a prefix of cli_ so that it's clear where they came from, and how dangerous they are (can be exploited).
+Ansible allows variables to be passed in on the command line using the `-e` option. These variables MUST have a prefix of cli_ so that it's clear where they came from, and that they could be exploited.
.Example:
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 18229631c..c3408702d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -50,7 +50,6 @@ class FilterModule(object):
return [item for sublist in data for item in sublist]
-
@staticmethod
def oo_collect(data, attribute=None, filters=None):
''' This takes a list of dict and collects all attributes specified into a
@@ -133,6 +132,16 @@ class FilterModule(object):
return rval
@staticmethod
+ def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
+ '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and
+ arrange them as a string 'key=value key=value'
+ '''
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects first param is a dict")
+
+ return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()])
+
+ @staticmethod
def oo_ami_selector(data, image_name):
''' This takes a list of amis and an image name and attempts to return
the latest ami.
@@ -233,15 +242,6 @@ class FilterModule(object):
return [x for x in data if x[filter_attr]]
@staticmethod
- def oo_build_zabbix_list_dict(values, string):
- ''' Build a list of dicts with string as key for each value
- '''
- rval = []
- for value in values:
- rval.append({string: value})
- return rval
-
- @staticmethod
def oo_parse_heat_stack_outputs(data):
''' Formats the HEAT stack output into a usable form
@@ -319,8 +319,8 @@ class FilterModule(object):
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
+ "oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
- "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
}
diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py
new file mode 100644
index 000000000..a473993a2
--- /dev/null
+++ b/filter_plugins/oo_zabbix_filters.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom zabbix filters for use in openshift-ansible
+'''
+
+import pdb
+
+class FilterModule(object):
+ ''' Custom zabbix ansible filters '''
+
+ @staticmethod
+ def create_data(data, results, key, new_key):
+ '''Take a dict, filter through results and add results['key'] to dict
+ '''
+ new_list = [app[key] for app in results]
+ data[new_key] = new_list
+ return data
+
+ @staticmethod
+ def oo_set_zbx_trigger_triggerid(item, trigger_results):
+ '''Set zabbix trigger id from trigger results
+ '''
+ if isinstance(trigger_results, list):
+ item['triggerid'] = trigger_results[0]['triggerid']
+ return item
+
+ item['triggerid'] = trigger_results['triggerids'][0]
+ return item
+
+ @staticmethod
+ def oo_set_zbx_item_hostid(item, template_results):
+ ''' Set zabbix host id from template results
+ '''
+ if isinstance(template_results, list):
+ item['hostid'] = template_results[0]['templateid']
+ return item
+
+ item['hostid'] = template_results['templateids'][0]
+ return item
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def select_by_name(ans_data, data):
+ ''' test
+ '''
+ for zabbix_item in data:
+ if ans_data['name'] == zabbix_item:
+ data[zabbix_item]['params']['hostid'] = ans_data['templateid']
+ return data[zabbix_item]['params']
+ return None
+
+ @staticmethod
+ def oo_build_zabbix_list_dict(values, string):
+ ''' Build a list of dicts with string as key for each value
+ '''
+ rval = []
+ for value in values:
+ rval.append({string: value})
+ return rval
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "select_by_name": self.select_by_name,
+ "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid,
+ "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
+ "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
+ "create_data": self.create_data,
+ }
diff --git a/git/.pylintrc b/git/.pylintrc
index af8f1656f..fe6eef6de 100644
--- a/git/.pylintrc
+++ b/git/.pylintrc
@@ -71,7 +71,7 @@ confidence=
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801
[REPORTS]
@@ -205,7 +205,7 @@ docstring-min-length=-1
[SIMILARITIES]
# Minimum lines number of a similarity.
-min-similarity-lines=4
+min-similarity-lines=0
# Ignore comments when computing similarities.
ignore-comments=yes
@@ -214,7 +214,7 @@ ignore-comments=yes
ignore-docstrings=yes
# Ignore imports when computing similarities.
-ignore-imports=no
+ignore-imports=yes
[VARIABLES]
diff --git a/git/pylint.sh b/git/pylint.sh
index 86ea52d45..8666931e9 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -15,7 +15,9 @@ NEWREV=$2
PYTHON=/var/lib/jenkins/python27/bin/python
+set +e
PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
+set -e
FILES_TO_TEST=""
@@ -40,5 +42,7 @@ done
if [ "${FILES_TO_TEST}" != "" ]; then
echo "Testing files: ${FILES_TO_TEST}"
- ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
+ exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
+else
+ exit 0
fi
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/aws/hosts/hosts
+++ b/inventory/aws/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/.gitignore b/inventory/byo/.gitignore
new file mode 100644
index 000000000..6ff331c7e
--- /dev/null
+++ b/inventory/byo/.gitignore
@@ -0,0 +1 @@
+hosts
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
deleted file mode 100644
index a9add6a60..000000000
--- a/inventory/byo/hosts
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
-
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
-
-# Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
-
-# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-
-# htpasswd auth
-#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
-
-# host group for masters
-[masters]
-ose3-master-ansible.test.example.com
-
-[etcd]
-#ose3-master-ansible.test.example.com
-
-# host group for nodes
-[nodes]
-ose3-master-ansible.test.example.com openshift_scheduleable=False
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
new file mode 100644
index 000000000..646790c42
--- /dev/null
+++ b/inventory/byo/hosts.example
@@ -0,0 +1,76 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_sudo=true
+
+# deployment type valid values are origin, online and enterprise
+deployment_type=enterprise
+
+# Pre-release registry URL
+#oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+
+# Pre-release Dev puddle repo
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Pre-release Errata puddle repo
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# master cluster ha variables using pacemaker or RHEL HA
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# master cluster ha variables when using a different HA solution
+# For installation the value of openshift_master_cluster_hostname must resolve
+# to the first master defined in the inventory.
+# The HA solution must be manually configured after installation and must ensure
+# that openshift-master is running on a single master host.
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_defer_ha=True
+
+# default subdomain to use for exposed routes
+#osm_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# host group for nodes
+[nodes]
+ose3-master[1:3]-ansible.test.example.com openshift_scheduleable=False
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/gce/hosts/hosts
+++ b/inventory/gce/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/libvirt/hosts/hosts
+++ b/inventory/libvirt/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index b7ce9e5dc..2cbf33473 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -78,7 +78,7 @@ class MultiEc2(object):
},
]
- self.config['cache_max_age'] = 0
+ self.config['cache_max_age'] = 300
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
index 9cdc31449..2d2194a4d 100644
--- a/inventory/openstack/hosts/hosts
+++ b/inventory/openstack/hosts/hosts
@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
+localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
new file mode 100644
index 000000000..684a0ca72
--- /dev/null
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -0,0 +1,142 @@
+---
+#example run:
+# ansible-playbook -e "cli_volume_size=1" \
+# -e "cli_device_name=/dev/xvdf" \
+# -e "cli_hosttype=master" \
+# -e "cli_environment=ops" \
+# create_pv.yaml
+# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
+#
+- name: Create a volume and attach it to master
+ hosts: localhost
+ gather_facts: no
+ vars:
+ cli_volume_type: gp2
+ cli_volume_iops: ''
+ oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
+ intersect(groups['tag_environment_' ~ cli_environment]) |
+ first }}"
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_volume_size
+ - cli_device_name
+ - cli_hosttype
+ - cli_environment
+
+ - name: set oo_name fact
+ set_fact:
+ oo_name: "{{ oo_name }}"
+
+
+ - name: Select a single master to run this on
+ add_host:
+ hostname: "{{ oo_name }}"
+ ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}"
+ groups: oo_master
+
+ - name: Create a volume and attach it
+ ec2_vol:
+ state: present
+ instance: "{{ hostvars[oo_name]['ec2_id'] }}"
+ region: "{{ hostvars[oo_name]['ec2_region'] }}"
+ volume_size: "{{ cli_volume_size }}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: "{{ cli_device_name }}"
+ iops: "{{ cli_volume_iops }}"
+ register: vol
+
+ - debug: var=vol
+
+- name: Configure the drive
+ gather_facts: no
+ hosts: oo_master
+ user: root
+ connection: ssh
+ vars:
+ pv_tmpdir: /tmp/persistentvolumes
+
+ post_tasks:
+ - name: Setting facts for template
+ set_fact:
+ pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}"
+ vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}"
+ vol_id: "{{ hostvars.localhost.vol.volume_id }}"
+ vol_size: "{{ cli_volume_size }}"
+ pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}"
+
+ - set_fact:
+ pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml"
+
+ - name: "Mkdir {{ pv_tmpdir }}"
+ file:
+ state: directory
+ path: "{{ pv_tmpdir }}"
+ mode: '0750'
+
+ - name: "Mkdir {{ pv_mntdir }}"
+ file:
+ state: directory
+ path: "{{ pv_mntdir }}"
+ mode: '0750'
+
+ - name: Create pv file from template
+ template:
+ src: ./pv-template.j2
+ dest: "{{ pv_template }}"
+ owner: root
+ mode: '0640'
+
+ - name: mkfs
+ filesystem:
+ dev: "{{ cli_device_name }}"
+ fstype: ext4
+
+ - name: Mount the dev
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ fstype: ext4
+ state: mounted
+
+ - name: chgrp g+rwXs
+ file:
+ path: "{{ pv_mntdir }}"
+ mode: 'g+rwXs'
+ recurse: yes
+ seuser: system_u
+ serole: object_r
+ setype: svirt_sandbox_file_t
+ selevel: s0
+
+ - name: umount
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: unmounted
+ fstype: ext4
+
+ - name: detach drive
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}"
+ id: "{{ hostvars.localhost.vol.volume_id }}"
+ instance: None
+
+ - name: "Remove {{ pv_mntdir }}"
+ file:
+ state: absent
+ path: "{{ pv_mntdir }}"
+
+ # We have to use the shell module because we can't set env vars with the command module.
+ - name: "Place PV into oc"
+ shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ register: oc_output
+
+ - debug: var=oc_output
+
+ - fail:
+ msg: "Failed to add {{ pv_template }} to master."
+ when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
new file mode 100644
index 000000000..5654ef6c4
--- /dev/null
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ pv_name }}
+ labels:
+ type: ebs
+spec:
+ capacity:
+ storage: {{ vol_size }}Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Recycle
+ awsElasticBlockStore:
+ volumeID: aws://{{ vol_az }}/{{ vol_id }}
+ fsType: ext4
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
new file mode 100644
index 000000000..a31cbef65
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -0,0 +1,51 @@
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ g_zserver: http://localhost/zabbix/api_jsonrpc.php
+ g_zuser: Admin
+ g_zpassword: zabbix
+ roles:
+ - ../../../roles/os_zabbix
+ post_tasks:
+
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: list
+ name: 'Template Heartbeat'
+ register: templ_heartbeat
+
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: list
+ name: 'Template App Zabbix Server'
+ register: templ_zabbix_server
+
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: list
+ name: 'Template App Zabbix Agent'
+ register: templ_zabbix_agent
+
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: list
+ register: templates
+
+ - debug: var=templ_heartbeat.results
+
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: absent
+ with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
+ when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml
new file mode 100644
index 000000000..50fff53b2
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/create_template.yml
@@ -0,0 +1,57 @@
+---
+- debug: var=ctp_template
+
+- name: Create Template
+ zbx_template:
+ server: "{{ ctp_zserver }}"
+ user: "{{ ctp_zuser }}"
+ password: "{{ ctp_zpassword }}"
+ name: "{{ ctp_template.name }}"
+ register: ctp_created_template
+
+- debug: var=ctp_created_template
+
+#- name: Create Application
+# zbxapi:
+# server: "{{ ctp_zserver }}"
+# user: "{{ ctp_zuser }}"
+# password: "{{ ctp_zpassword }}"
+# zbx_class: Application
+# state: present
+# params:
+# name: "{{ ctp_template.application.name}}"
+# hostid: "{{ ctp_created_template.results[0].templateid }}"
+# search:
+# name: "{{ ctp_template.application.name}}"
+# register: ctp_created_application
+
+#- debug: var=ctp_created_application
+
+- name: Create Items
+ zbx_item:
+ server: "{{ ctp_zserver }}"
+ user: "{{ ctp_zuser }}"
+ password: "{{ ctp_zpassword }}"
+ key: "{{ item.key }}"
+ name: "{{ item.name | default(item.key, true) }}"
+ value_type: "{{ item.value_type | default('int') }}"
+ template_name: "{{ ctp_template.name }}"
+ with_items: ctp_template.zitems
+ register: ctp_created_items
+
+#- debug: var=ctp_created_items
+
+- name: Create Triggers
+ zbx_trigger:
+ server: "{{ ctp_zserver }}"
+ user: "{{ ctp_zuser }}"
+ password: "{{ ctp_zpassword }}"
+ description: "{{ item.description }}"
+ expression: "{{ item.expression }}"
+ priority: "{{ item.priority }}"
+ with_items: ctp_template.ztriggers
+ when: ctp_template.ztriggers is defined
+
+#- debug: var=ctp_created_triggers
+
+
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
new file mode 100644
index 000000000..1729194b5
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
@@ -0,0 +1,38 @@
+---
+- hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars/template_heartbeat.yml
+ - vars/template_os_linux.yml
+ vars:
+ g_zserver: http://localhost/zabbix/api_jsonrpc.php
+ g_zuser: Admin
+ g_zpassword: zabbix
+ roles:
+ - ../../../roles/os_zabbix
+ post_tasks:
+ - zbx_template:
+ server: "{{ g_zserver }}"
+ user: "{{ g_zuser }}"
+ password: "{{ g_zpassword }}"
+ state: list
+ register: templates
+
+ - debug: var=templates
+
+ - name: Include Template
+ include: create_template.yml
+ vars:
+ ctp_template: "{{ g_template_heartbeat }}"
+ ctp_zserver: "{{ g_zserver }}"
+ ctp_zuser: "{{ g_zuser }}"
+ ctp_zpassword: "{{ g_zpassword }}"
+
+ - name: Include Template
+ include: create_template.yml
+ vars:
+ ctp_template: "{{ g_template_os_linux }}"
+ ctp_zserver: "{{ g_zserver }}"
+ ctp_zuser: "{{ g_zuser }}"
+ ctp_zpassword: "{{ g_zpassword }}"
+
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
new file mode 100644
index 000000000..22cc75554
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
@@ -0,0 +1,11 @@
+---
+g_template_heartbeat:
+ name: Template Heartbeat
+ zitems:
+ - name: Heartbeat Ping
+ hostid:
+ key: heartbeat.ping
+ ztriggers:
+ - description: 'Heartbeat.ping has failed on {HOST.NAME}'
+ expression: '{Template Heartbeat:heartbeat.ping.last()}<>0'
+ priority: avg
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml
new file mode 100644
index 000000000..e7cc667cb
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_host.yml
@@ -0,0 +1,27 @@
+---
+g_template_host:
+ params:
+ name: Template Host
+ host: Template Host
+ groups:
+ - groupid: 1 # FIXME (not real)
+ output: extend
+ search:
+ name: Template Host
+ zitems:
+ - name: Host Ping
+ hostid:
+ key_: host.ping
+ type: 2
+ value_type: 0
+ output: extend
+ search:
+ key_: host.ping
+ ztriggers:
+ - description: 'Host ping has failed on {HOST.NAME}'
+ expression: '{Template Host:host.ping.last()}<>0'
+ priority: 3
+ searchWildcardsEnabled: True
+ search:
+ description: 'Host ping has failed on*'
+ expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml
new file mode 100644
index 000000000..5f9b41a4f
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_master.yml
@@ -0,0 +1,27 @@
+---
+g_template_master:
+ params:
+ name: Template Master
+ host: Template Master
+ groups:
+ - groupid: 1 # FIXME (not real)
+ output: extend
+ search:
+ name: Template Master
+ zitems:
+ - name: Master Etcd Ping
+ hostid:
+ key_: master.etcd.ping
+ type: 2
+ value_type: 0
+ output: extend
+ search:
+ key_: master.etcd.ping
+ ztriggers:
+ - description: 'Master Etcd ping has failed on {HOST.NAME}'
+ expression: '{Template Master:master.etcd.ping.last()}<>0'
+ priority: 3
+ searchWildcardsEnabled: True
+ search:
+ description: 'Master Etcd ping has failed on*'
+ expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml
new file mode 100644
index 000000000..98c343a24
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_node.yml
@@ -0,0 +1,27 @@
+---
+g_template_node:
+ params:
+ name: Template Node
+ host: Template Node
+ groups:
+ - groupid: 1 # FIXME (not real)
+ output: extend
+ search:
+ name: Template Node
+ zitems:
+ - name: Kubelet Ping
+ hostid:
+ key_: kubelet.ping
+ type: 2
+ value_type: 0
+ output: extend
+ search:
+ key_: kubelet.ping
+ ztriggers:
+ - description: 'Kubelet ping has failed on {HOST.NAME}'
+ expression: '{Template Node:kubelet.ping.last()}<>0'
+ priority: 3
+ searchWildcardsEnabled: True
+ search:
+ description: 'Kubelet ping has failed on*'
+ expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
new file mode 100644
index 000000000..9cc038ffa
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
@@ -0,0 +1,90 @@
+---
+g_template_os_linux:
+ name: Template OS Linux
+ zitems:
+ - key: kernel.uname.sysname
+ value_type: string
+
+ - key: kernel.all.cpu.wait.total
+ value_type: int
+
+ - key: kernel.all.cpu.irq.hard
+ value_type: int
+
+ - key: kernel.all.cpu.idle
+ value_type: int
+
+ - key: kernel.uname.distro
+ value_type: string
+
+ - key: kernel.uname.nodename
+ value_type: string
+
+ - key: kernel.all.cpu.irq.soft
+ value_type: int
+
+ - key: kernel.all.load.15_minute
+ value_type: float
+
+ - key: kernel.all.cpu.sys
+ value_type: int
+
+ - key: kernel.all.load.5_minute
+ value_type: float
+
+ - key: mem.freemem
+ value_type: int
+
+ - key: kernel.all.cpu.nice
+ value_type: int
+
+ - key: mem.util.bufmem
+ value_type: int
+
+ - key: swap.used
+ value_type: int
+
+ - key: kernel.all.load.1_minute
+ value_type: float
+
+ - key: kernel.uname.version
+ value_type: string
+
+ - key: swap.length
+ value_type: int
+
+ - key: mem.physmem
+ value_type: int
+
+ - key: kernel.all.uptime
+ value_type: int
+
+ - key: swap.free
+ value_type: int
+
+ - key: mem.util.used
+ value_type: int
+
+ - key: kernel.all.cpu.user
+ value_type: int
+
+ - key: kernel.uname.machine
+ value_type: string
+
+ - key: hinv.ncpu
+ value_type: int
+
+ - key: mem.util.cached
+ value_type: int
+
+ - key: kernel.all.cpu.steal
+ value_type: int
+
+ - key: kernel.all.pswitch
+ value_type: int
+
+ - key: kernel.uname.release
+ value_type: string
+
+ - key: proc.nprocs
+ value_type: int
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml
new file mode 100644
index 000000000..4dae7da1e
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/vars/template_router.yml
@@ -0,0 +1,27 @@
+---
+g_template_router:
+ params:
+ name: Template Router
+ host: Template Router
+ groups:
+ - groupid: 1 # FIXME (not real)
+ output: extend
+ search:
+ name: Template Router
+ zitems:
+ - name: Router Backends down
+ hostid:
+ key_: router.backends.down
+ type: 2
+ value_type: 0
+ output: extend
+ search:
+ key_: router.backends.down
+ ztriggers:
+ - description: 'Number of router backends down on {HOST.NAME}'
+ expression: '{Template Router:router.backends.down.last()}<>0'
+ priority: 3
+ searchWildcardsEnabled: True
+ search:
+ description: 'Number of router backends down on {HOST.NAME}'
+ expandExpression: True
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 6ee539c7e..8106d5da9 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 5db87fa90..a89275597 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -17,6 +17,7 @@
instances: "{{ etcd_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
- include: tasks/launch_instances.yml
@@ -24,13 +25,29 @@
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- add_host:
name: "{{ master_names.0 }}"
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
index 29e594a65..2b1db62d8 100644
--- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -158,7 +158,7 @@ EXAMPLES = '''
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
- instance_type: m3.medium
+ instance_type: m4.medium
key_name: mykey
wait: yes
'''
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 25a87aaf6..236d84e74 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -5,6 +5,7 @@
env: "{{ cluster }}"
env_host_type: "{{ cluster }}-openshift-{{ type }}"
host_type: "{{ type }}"
+ sub_host_type: "{{ g_sub_host_type }}"
- set_fact:
ec2_region: "{{ lookup('env', 'ec2_region')
@@ -34,6 +35,35 @@
ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
| default(deployment_vars[deployment_type].assign_public_ip, true) }}"
when: ec2_assign_public_ip is not defined
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_master_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "master" and sub_host_type == "default"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_etcd_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true)}}"
+ when: host_type == "etcd" and sub_host_type == "default"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_infra_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "node" and sub_host_type == "infra"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_node_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "node" and sub_host_type == "compute"
+
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
- set_fact:
ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
| default(deployment_vars[deployment_type].security_groups, true) }}"
@@ -69,7 +99,7 @@
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
node:
root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
docker:
@@ -99,6 +129,7 @@
env: "{{ env }}"
host-type: "{{ host_type }}"
env-host-type: "{{ env_host_type }}"
+ sub-host-type: "{{ sub_host_type }}"
volumes: "{{ volumes }}"
register: ec2
@@ -112,7 +143,9 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+ instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }},
+ tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
+ tag_sub-host-type_{{ sub_host_type }}"
- name: Add new instances groups and variables
add_host:
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 9c3703aba..77287cad0 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -13,6 +13,15 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
- name: Terminate instances
hosts: localhost
connection: local
@@ -28,6 +37,7 @@
env: "{{ item['ec2_tag_env'] }}"
host-type: "{{ item['ec2_tag_host-type'] }}"
env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+ sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
with_items: host_vars
when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
index e406a7635..bb18e13b0 100644
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.small
+ec2_master_security_groups: [ 'integration', 'integration-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'integration', 'integration-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
index e406a7635..bbef9cc56 100644
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.small
+ec2_master_security_groups: [ 'production', 'production-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'production', 'production-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'production', 'production-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
index e406a7635..9008a55ba 100644
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.small
+ec2_master_security_groups: [ 'stage', 'stage-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'stage', 'stage-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 07e453f89..95bc4b3e2 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,14 +1,14 @@
---
deployment_vars:
origin:
- # fedora, since centos requires marketplace
- image: ami-acd999c4
+ # centos-7, requires marketplace
+ image: ami-96a818fe
image_name:
region: us-east-1
- ssh_user: fedora
+ ssh_user: centos
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -20,7 +20,7 @@ deployment_vars:
ssh_user: root
sudo: no
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -32,7 +32,7 @@ deployment_vars:
ssh_user: ec2-user
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
new file mode 100644
index 000000000..f564905ea
--- /dev/null
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -0,0 +1,12 @@
+---
+- hosts: all
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - role: rhel_subscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
new file mode 100644
index 000000000..76246e7b0
--- /dev/null
+++ b/playbooks/byo/vagrant.yml
@@ -0,0 +1,4 @@
+---
+- include: rhel_subscribe.yml
+
+- include: config.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 0779cfe47..4c74f96db 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -39,6 +39,15 @@
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_nodes_group] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
- name: Evaluate oo_first_etcd
add_host:
name: "{{ groups[g_etcd_group][0] }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
index 96e1a9a63..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -1,11 +1,13 @@
---
-- set_fact: k8s_type="node"
+- set_fact: k8s_type=node
+- set_fact: sub_host_type="{{ type }}"
+- set_fact: number_nodes="{{ count }}"
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
- with_sequence: count={{ num_nodes }}
+ with_sequence: count={{ number_nodes }}
- set_fact:
node_names: "{{ node_names_output.results | default([])
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index e92c6f1ee..190e2d862 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -3,5 +3,10 @@
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
+ - role: rhel_subscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
- openshift_repos
- os_update_latest
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 3956128e1..904ad2dab 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -27,6 +27,9 @@
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
+ cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
+ cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
@@ -152,16 +155,26 @@
roles:
- openshift_master_certificates
post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
- name: Create a tarball of the master certs
command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }} .
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
args:
- creates: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz"
+ creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
with_items: masters_needing_certs
- name: Retrieve the master cert tarball from the master
fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz"
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
@@ -172,6 +185,7 @@
hosts: oo_masters_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -192,9 +206,14 @@
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-- name: Deploy OpenShift examples
+- name: Additional master configuration
hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
- openshift_examples
# Additional instance config for online deployments
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index bd35008b8..705f7f223 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -129,16 +129,15 @@
openshift_nodes: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'])
| oo_collect('openshift.common.hostname') }}"
- openshift_unscheduleable_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']
- | default([]))
- | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
+ openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
+ openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
- set_fact:
openshift_scheduleable_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']
- | default([]))
+ | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| difference(openshift_unscheduleable_nodes) }}"
+
roles:
- openshift_manage_node
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 35737f03d..7a3b80da0 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -15,17 +15,33 @@
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ infra_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- set_fact:
- a_master: "{{ master_names[0] }}"
- - add_host: name={{ a_master }} groups=service_master
+ a_infra: "{{ infra_names[0] }}"
+ - add_host: name={{ a_infra }} groups=service_master
- include: update.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 9a9848f05..6307ecc27 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -14,6 +14,7 @@
- created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
- env-{{ cluster }}
- host-type-{{ type }}
+ - sub-host-type-{{ sub_host_type }}
- env-host-type-{{ cluster }}-openshift-{{ type }}
register: gce
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index abe6a4c95..098b0df73 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -8,7 +8,7 @@
- set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
- add_host:
name: "{{ item }}"
- groups: oo_nodes_to_terminate
+ groups: oo_hosts_to_terminate, oo_nodes_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
@@ -16,11 +16,22 @@
- set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- add_host:
name: "{{ item }}"
- groups: oo_masters_to_terminate
+ groups: oo_hosts_to_terminate, oo_masters_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
- include: ../openshift-node/terminate.yml
vars:
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index a7ddc1e7e..830f9d216 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -13,7 +13,7 @@
image_name: "{{ deployment_vars[deployment_type].image.name }}"
tasks:
- fail: msg="Deployment type not supported for libvirt provider yet"
- when: deployment_type in ['online', 'enterprise']
+ when: deployment_type == 'online'
- include: tasks/configure_libvirt.yml
@@ -23,13 +23,29 @@
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- include: update.yml
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index b173a09dd..8f00812a9 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -15,6 +15,23 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[cluster_group] | default([])
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+- name: Terminate instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
- name: Destroy VMs
virt:
name: '{{ item[0] }}'
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index e3c8cd8d0..c77a0797e 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -24,9 +24,12 @@ deployment_vars:
sudo: no
enterprise:
image:
- url:
- name:
- sha256:
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
ssh_user: openshift
sudo: yes
# origin:
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index a15ec749c..d53884e0d 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -16,8 +16,13 @@ parameters:
num_nodes:
type: number
- label: Number of nodes
- description: Number of nodes
+ label: Number of compute nodes
+ description: Number of compute nodes
+
+ num_infra:
+ type: number
+ label: Number of infrastructure nodes
+ description: Number of infrastructure nodes
cidr:
type: string
@@ -55,7 +60,12 @@ parameters:
node_image:
type: string
label: Node image
- description: Name of the image for the node servers
+ description: Name of the image for the compute node servers
+
+ infra_image:
+ type: string
+ label: Infra image
+ description: Name of the image for the infra node servers
master_flavor:
type: string
@@ -65,7 +75,12 @@ parameters:
node_flavor:
type: string
label: Node flavor
- description: Flavor of the node servers
+ description: Flavor of the compute node servers
+
+ infra_flavor:
+ type: string
+ label: Infra flavor
+ description: Flavor of the infra node servers
outputs:
@@ -83,15 +98,27 @@ outputs:
node_names:
description: Name of the nodes
- value: { get_attr: [ nodes, name ] }
+ value: { get_attr: [ compute_nodes, name ] }
node_ips:
description: IPs of the nodes
- value: { get_attr: [ nodes, private_ip ] }
+ value: { get_attr: [ compute_nodes, private_ip ] }
node_floating_ips:
description: Floating IPs of the nodes
- value: { get_attr: [ nodes, floating_ip ] }
+ value: { get_attr: [ compute_nodes, floating_ip ] }
+
+ infra_names:
+ description: Name of the nodes
+ value: { get_attr: [ infra_nodes, name ] }
+
+ infra_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ infra_nodes, private_ip ] }
+
+ infra_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ infra_nodes, floating_ip ] }
resources:
@@ -218,6 +245,29 @@ resources:
remote_mode: remote_group_id
remote_group_id: { get_resource: master-secgrp }
+ infra-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-infra-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift infrastructure cluster nodes
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 80
+ port_range_max: 80
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 443
+ port_range_max: 443
+
masters:
type: OS::Heat::ResourceGroup
properties:
@@ -248,7 +298,7 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on: interface
- nodes:
+ compute_nodes:
type: OS::Heat::ResourceGroup
properties:
count: { get_param: num_nodes }
@@ -257,12 +307,14 @@ resources:
properties:
name:
str_replace:
- template: cluster_id-k8s_type-%index%
+ template: cluster_id-k8s_type-sub_host_type-%index%
params:
cluster_id: { get_param: cluster_id }
k8s_type: node
+ sub_host_type: compute
cluster_id: { get_param: cluster_id }
type: node
+ subtype: compute
image: { get_param: node_image }
flavor: { get_param: node_flavor }
key_name: { get_resource: keypair }
@@ -277,3 +329,36 @@ resources:
params:
cluster_id: { get_param: cluster_id }
depends_on: interface
+
+ infra_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_infra }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-sub_host_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: node
+ sub_host_type: infra
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: infra
+ image: { get_param: infra_image }
+ flavor: { get_param: infra_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ - { get_resource: infra-secgrp }
+ floating_network: { get_param: external_net }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index 55f64211a..9dcab3e60 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -19,6 +19,12 @@ parameters:
label: Type
description: Type master or node
+ subtype:
+ type: string
+ label: Sub-type
+ description: Sub-type compute or infra for nodes, default otherwise
+ default: default
+
key_name:
type: string
label: Key name
@@ -102,11 +108,12 @@ resources:
env: { get_param: cluster_id }
host-type: { get_param: type }
env-host-type:
- str_template:
+ str_replace:
template: cluster_id-openshift-type
params:
cluster_id: { get_param: cluster_id }
type: { get_param: type }
+ sub-host-type: { get_param: subtype }
port:
type: OS::Neutron::Port
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 3cdd2ae4d..d36bdbf26 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -27,10 +27,13 @@
-P ssh_incoming={{ openstack_ssh_access_from }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
+ -P num_infra={{ num_infra }}
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
+ -P infra_image={{ deployment_vars[deployment_type].image }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
+ -P infra_flavor={{ openstack_flavor["infra"] }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
openshift-ansible-{{ cluster_id }}-stack'
when: stack_show_result.rc == 1
@@ -43,10 +46,13 @@
-P ssh_incoming={{ openstack_ssh_access_from }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
+ -P num_infra={{ num_infra }}
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
+ -P infra_image={{ deployment_vars[deployment_type].image }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
+ -P infra_flavor={{ openstack_flavor["infra"] }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
openshift-ansible-{{ cluster_id }}-stack'
when: stack_show_result.rc == 0
@@ -72,7 +78,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master'
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default'
with_together:
- parsed_outputs.master_names
- parsed_outputs.master_ips
@@ -84,12 +90,24 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node'
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute'
with_together:
- parsed_outputs.node_names
- parsed_outputs.node_ips
- parsed_outputs.node_floating_ips
+ - name: Add new infra instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra'
+ with_together:
+ - parsed_outputs.infra_names
+ - parsed_outputs.infra_ips
+ - parsed_outputs.infra_floating_ips
+
- name: Wait for ssh
wait_for:
host: '{{ item }}'
@@ -97,6 +115,7 @@
with_flattened:
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -107,6 +126,7 @@
with_flattened:
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index fc4ec3c88..62df2be73 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -5,6 +5,31 @@
vars_files:
- vars.yml
tasks:
+ - set_fact: cluster_group=tag_env_{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
+
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
- name: Delete the OpenStack Stack
command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
register: stack_delete_result
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index d077a6ced..43e25f2e6 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -19,6 +19,7 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
+ infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
deployment_vars:
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 0fb45f37c..0f216b84e 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,5 +1,5 @@
---
-etcd_interface: eth0
+etcd_interface: "{{ ansible_default_ipv4.interface }}"
etcd_client_port: 2379
etcd_peer_port: 2380
etcd_peers_group: etcd
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 82b1a62b8..92d44ef4d 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -17,3 +17,4 @@ galaxy_info:
- system
dependencies:
- { role: os_firewall }
+- { role: openshift_repos }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 79a91dfde..27bfb7de9 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd
- yum: pkg=etcd state=present
+ yum: pkg=etcd-2.* state=present
- name: Validate permissions on the config dir
file:
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 801be2c97..9ac23b1dd 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -16,8 +16,8 @@ ETCD_NAME=default
{% endif %}
ETCD_DATA_DIR={{ etcd_data_dir }}
#ETCD_SNAPSHOT_COUNTER="10000"
-#ETCD_HEARTBEAT_INTERVAL="100"
-#ETCD_ELECTION_TIMEOUT="1000"
+ETCD_HEARTBEAT_INTERVAL="500"
+ETCD_ELECTION_TIMEOUT="2500"
ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
diff --git a/roles/etcd_ca/meta/main.yml b/roles/etcd_ca/meta/main.yml
index ce909b992..fb9280c9e 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/etcd_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index d828db52a..d592dc306 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -39,6 +39,9 @@
owner: 'td-agent'
mode: 0444
+- name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes."
+ pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }}
+
- name: ensure td-agent is running
service:
name: 'td-agent'
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index 14c2037e4..eb4ef26e8 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -7,7 +7,7 @@ Requirements
------------
A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
diff --git a/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json
index ff19a4834..c05e81e56 100644
--- a/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json
@@ -47,7 +47,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteMany"
+ "ReadWriteOnce"
],
"resources": {
"requests": {
@@ -148,7 +148,7 @@
{
"name": "${DATABASE_SERVICE_NAME}-data",
"persistentVolumeClaim": {
- "claimName": "mongodb"
+ "claimName": "${DATABASE_SERVICE_NAME}"
}
}
],
diff --git a/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json
index 90225e9c3..33e1cebb1 100644
--- a/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json
@@ -47,7 +47,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteMany"
+ "ReadWriteOnce"
],
"resources": {
"requests": {
@@ -144,7 +144,7 @@
{
"name": "${DATABASE_SERVICE_NAME}-data",
"persistentVolumeClaim": {
- "claimName": "mysql"
+ "claimName": "${DATABASE_SERVICE_NAME}"
}
}
],
diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json
index 6922baa12..faf626475 100644
--- a/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json
@@ -143,7 +143,7 @@
{
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name",
- "value": "mysql"
+ "value": "postgresql"
},
{
"name": "POSTGRESQL_USER",
diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json
index 43162d8bb..398e288bf 100644
--- a/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json
@@ -47,7 +47,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteMany"
+ "ReadWriteOnce"
],
"resources": {
"requests": {
@@ -144,7 +144,7 @@
{
"name": "${DATABASE_SERVICE_NAME}-data",
"persistentVolumeClaim": {
- "claimName": "postgresql"
+ "claimName": "${DATABASE_SERVICE_NAME}"
}
}
],
@@ -160,7 +160,7 @@
{
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name",
- "value": "mysql"
+ "value": "postgresql"
},
{
"name": "POSTGRESQL_USER",
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
index 712a43a11..459b841c1 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
@@ -251,6 +251,29 @@
}
]
}
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1beta3",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/jenkins-16-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "1.6",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
index e5699bce7..b722a05ae 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-mysql-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "cakephp-frontend"
+ "name": "cakephp-mysql-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-route"
+ "name": "cakephp-mysql-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "cakephp-frontend"
+ "name": "cakephp-mysql-example"
}
}
},
@@ -53,7 +53,7 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-example",
+ "name": "cakephp-mysql-example",
"annotations": {
"description": "Keeps track of changes in the application image"
}
@@ -63,7 +63,7 @@
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-example",
+ "name": "cakephp-mysql-example",
"annotations": {
"description": "Defines how to build the application"
}
@@ -90,7 +90,7 @@
"output": {
"to": {
"kind": "ImageStreamTag",
- "name": "cakephp-example:latest"
+ "name": "cakephp-mysql-example:latest"
}
},
"triggers": [
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-mysql-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -125,7 +125,7 @@
"command": [
"./migrate-database.sh"
],
- "containerName": "cakephp-example"
+ "containerName": "cakephp-mysql-example"
}
}
}
@@ -136,11 +136,11 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "cakephp-example"
+ "cakephp-mysql-example"
],
"from": {
"kind": "ImageStreamTag",
- "name": "cakephp-example:latest"
+ "name": "cakephp-mysql-example:latest"
}
}
},
@@ -150,20 +150,20 @@
],
"replicas": 1,
"selector": {
- "name": "cakephp-frontend"
+ "name": "cakephp-mysql-example"
},
"template": {
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-mysql-example",
"labels": {
- "name": "cakephp-frontend"
+ "name": "cakephp-mysql-example"
}
},
"spec": {
"containers": [
{
- "name": "cakephp-example",
- "image": "cakephp-example",
+ "name": "cakephp-mysql-example",
+ "image": "cakephp-mysql-example",
"ports": [
{
"containerPort": 8080
@@ -308,7 +308,7 @@
{
"name": "APPLICATION_DOMAIN",
"description": "The exposed hostname that will route to the CakePHP service",
- "value": "cakephp-example.openshiftapps.com"
+ "value": "cakephp-mysql-example.openshiftapps.com"
},
{
"name": "GITHUB_WEBHOOK_SECRET",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
index 09521add4..532b9bd2b 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "cakephp-frontend"
+ "name": "cakephp-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-route"
+ "name": "cakephp-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "cakephp-frontend"
+ "name": "cakephp-example"
}
}
},
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -139,13 +139,13 @@
],
"replicas": 1,
"selector": {
- "name": "cakephp-frontend"
+ "name": "cakephp-example"
},
"template": {
"metadata": {
- "name": "cakephp-frontend",
+ "name": "cakephp-example",
"labels": {
- "name": "cakephp-frontend"
+ "name": "cakephp-example"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
index fc92a1d6c..b46abfbb5 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-mysql-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "dancer-frontend"
+ "name": "dancer-mysql-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-route"
+ "name": "dancer-mysql-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "dancer-frontend"
+ "name": "dancer-mysql-example"
}
}
},
@@ -53,7 +53,7 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-example",
+ "name": "dancer-mysql-example",
"annotations": {
"description": "Keeps track of changes in the application image"
}
@@ -63,7 +63,7 @@
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-example",
+ "name": "dancer-mysql-example",
"annotations": {
"description": "Defines how to build the application"
}
@@ -90,7 +90,7 @@
"output": {
"to": {
"kind": "ImageStreamTag",
- "name": "dancer-example:latest"
+ "name": "dancer-mysql-example:latest"
}
},
"triggers": [
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-mysql-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -122,11 +122,11 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "dancer-example"
+ "dancer-mysql-example"
],
"from": {
"kind": "ImageStreamTag",
- "name": "dancer-example:latest"
+ "name": "dancer-mysql-example:latest"
}
}
},
@@ -136,20 +136,20 @@
],
"replicas": 1,
"selector": {
- "name": "dancer-frontend"
+ "name": "dancer-mysql-example"
},
"template": {
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-mysql-example",
"labels": {
- "name": "dancer-frontend"
+ "name": "dancer-mysql-example"
}
},
"spec": {
"containers": [
{
- "name": "dancer-example",
- "image": "dancer-example",
+ "name": "dancer-mysql-example",
+ "image": "dancer-mysql-example",
"ports": [
{
"containerPort": 8080
@@ -282,7 +282,7 @@
{
"name": "APPLICATION_DOMAIN",
"description": "The exposed hostname that will route to the Dancer service",
- "value": "dancer-example.openshiftapps.com"
+ "value": "dancer-mysql-example.openshiftapps.com"
},
{
"name": "GITHUB_WEBHOOK_SECRET",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
index 829f50bae..6f49a7d64 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "dancer-frontend"
+ "name": "dancer-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-route"
+ "name": "dancer-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "dancer-frontend"
+ "name": "dancer-example"
}
}
},
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -139,13 +139,13 @@
],
"replicas": 1,
"selector": {
- "name": "dancer-frontend"
+ "name": "dancer-example"
},
"template": {
"metadata": {
- "name": "dancer-frontend",
+ "name": "dancer-example",
"labels": {
- "name": "dancer-frontend"
+ "name": "dancer-example"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json
index c46476e8a..64522ce76 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "django-frontend",
+ "name": "django-postgresql-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "django-frontend"
+ "name": "django-postgresql-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "django-route"
+ "name": "django-postgresql-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "django-frontend"
+ "name": "django-postgresql-example"
}
}
},
@@ -53,7 +53,7 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "django-example",
+ "name": "django-postgresql-example",
"annotations": {
"description": "Keeps track of changes in the application image"
}
@@ -63,7 +63,7 @@
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
- "name": "django-example",
+ "name": "django-postgresql-example",
"annotations": {
"description": "Defines how to build the application"
}
@@ -90,7 +90,7 @@
"output": {
"to": {
"kind": "ImageStreamTag",
- "name": "django-example:latest"
+ "name": "django-postgresql-example:latest"
}
},
"triggers": [
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "django-frontend",
+ "name": "django-postgresql-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -125,11 +125,11 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "django-example"
+ "django-postgresql-example"
],
"from": {
"kind": "ImageStreamTag",
- "name": "django-example:latest"
+ "name": "django-postgresql-example:latest"
}
}
},
@@ -139,20 +139,20 @@
],
"replicas": 1,
"selector": {
- "name": "django-frontend"
+ "name": "django-postgresql-example"
},
"template": {
"metadata": {
- "name": "django-frontend",
+ "name": "django-postgresql-example",
"labels": {
- "name": "django-frontend"
+ "name": "django-postgresql-example"
}
},
"spec": {
"containers": [
{
- "name": "django-example",
- "image": "django-example",
+ "name": "django-postgresql-example",
+ "image": "django-postgresql-example",
"ports": [
{
"containerPort": 8080
@@ -293,7 +293,7 @@
{
"name": "APPLICATION_DOMAIN",
"description": "The exposed hostname that will route to the Django service",
- "value": "django-example.openshiftapps.com"
+ "value": "django-postgresql-example.openshiftapps.com"
},
{
"name": "GITHUB_WEBHOOK_SECRET",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django.json b/roles/openshift_examples/files/examples/quickstart-templates/django.json
index 74bbea163..1794d196c 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/django.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/django.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "django-frontend",
+ "name": "django-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "django-frontend"
+ "name": "django-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "django-route"
+ "name": "django-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "django-frontend"
+ "name": "django-example"
}
}
},
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "django-frontend",
+ "name": "django-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -139,13 +139,13 @@
],
"replicas": 1,
"selector": {
- "name": "django-frontend"
+ "name": "django-example"
},
"template": {
"metadata": {
- "name": "django-frontend",
+ "name": "django-example",
"labels": {
- "name": "django-frontend"
+ "name": "django-example"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
index cd9e5faf0..e281feecf 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-mongodb-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "nodejs-frontend"
+ "name": "nodejs-mongodb-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-route"
+ "name": "nodejs-mongodb-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "nodejs-frontend"
+ "name": "nodejs-mongodb-example"
}
}
},
@@ -53,7 +53,7 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-example",
+ "name": "nodejs-mongodb-example",
"annotations": {
"description": "Keeps track of changes in the application image"
}
@@ -63,7 +63,7 @@
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-example",
+ "name": "nodejs-mongodb-example",
"annotations": {
"description": "Defines how to build the application"
}
@@ -90,7 +90,7 @@
"output": {
"to": {
"kind": "ImageStreamTag",
- "name": "nodejs-example:latest"
+ "name": "nodejs-mongodb-example:latest"
}
},
"triggers": [
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-mongodb-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -125,11 +125,11 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "nodejs-example"
+ "nodejs-mongodb-example"
],
"from": {
"kind": "ImageStreamTag",
- "name": "nodejs-example:latest"
+ "name": "nodejs-mongodb-example:latest"
}
}
},
@@ -139,20 +139,20 @@
],
"replicas": 1,
"selector": {
- "name": "nodejs-frontend"
+ "name": "nodejs-mongodb-example"
},
"template": {
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-mongodb-example",
"labels": {
- "name": "nodejs-frontend"
+ "name": "nodejs-mongodb-example"
}
},
"spec": {
"containers": [
{
- "name": "nodejs-example",
- "image": "nodejs-example",
+ "name": "nodejs-mongodb-example",
+ "image": "nodejs-mongodb-example",
"ports": [
{
"containerPort": 8080
@@ -289,7 +289,7 @@
{
"name": "APPLICATION_DOMAIN",
"description": "The exposed hostname that will route to the Node.js service",
- "value": "nodejs-example.openshiftapps.com"
+ "value": "nodejs-mongodb-example.openshiftapps.com"
},
{
"name": "GITHUB_WEBHOOK_SECRET",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
index ff7dd574e..dd38571eb 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "nodejs-frontend"
+ "name": "nodejs-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-route"
+ "name": "nodejs-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "nodejs-frontend"
+ "name": "nodejs-example"
}
}
},
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -139,13 +139,13 @@
],
"replicas": 1,
"selector": {
- "name": "nodejs-frontend"
+ "name": "nodejs-example"
},
"template": {
"metadata": {
- "name": "nodejs-frontend",
+ "name": "nodejs-example",
"labels": {
- "name": "nodejs-frontend"
+ "name": "nodejs-example"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json
index ec7da77e3..3b83d3f5b 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,7 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "rails-frontend",
+ "name": "rails-postgresql-example",
"annotations": {
"description": "Exposes and load balances the application pods"
}
@@ -31,7 +31,7 @@
}
],
"selector": {
- "name": "rails-frontend"
+ "name": "rails-postgresql-example"
}
}
},
@@ -39,13 +39,13 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "rails-route"
+ "name": "rails-postgresql-example"
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
"to": {
"kind": "Service",
- "name": "rails-frontend"
+ "name": "rails-postgresql-example"
}
}
},
@@ -53,7 +53,7 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "rails-example",
+ "name": "rails-postgresql-example",
"annotations": {
"description": "Keeps track of changes in the application image"
}
@@ -63,7 +63,7 @@
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
- "name": "rails-example",
+ "name": "rails-postgresql-example",
"annotations": {
"description": "Defines how to build the application"
}
@@ -90,7 +90,7 @@
"output": {
"to": {
"kind": "ImageStreamTag",
- "name": "rails-example:latest"
+ "name": "rails-postgresql-example:latest"
}
},
"triggers": [
@@ -110,7 +110,7 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "rails-frontend",
+ "name": "rails-postgresql-example",
"annotations": {
"description": "Defines how to deploy the application server"
}
@@ -125,7 +125,7 @@
"command": [
"./migrate-database.sh"
],
- "containerName": "rails-example"
+ "containerName": "rails-postgresql-example"
}
}
}
@@ -136,11 +136,11 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "rails-example"
+ "rails-postgresql-example"
],
"from": {
"kind": "ImageStreamTag",
- "name": "rails-example:latest"
+ "name": "rails-postgresql-example:latest"
}
}
},
@@ -150,20 +150,20 @@
],
"replicas": 1,
"selector": {
- "name": "rails-frontend"
+ "name": "rails-postgresql-example"
},
"template": {
"metadata": {
- "name": "rails-frontend",
+ "name": "rails-postgresql-example",
"labels": {
- "name": "rails-frontend"
+ "name": "rails-postgresql-example"
}
},
"spec": {
"containers": [
{
- "name": "rails-example",
- "image": "rails-example",
+ "name": "rails-postgresql-example",
+ "image": "rails-postgresql-example",
"ports": [
{
"containerPort": 8080
@@ -328,7 +328,7 @@
{
"name": "APPLICATION_DOMAIN",
"description": "The exposed hostname that will route to the Rails service",
- "value": "rails-example.openshiftapps.com"
+ "value": "rails-postgresql-example.openshiftapps.com"
},
{
"name": "GITHUB_WEBHOOK_SECRET",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json
index c0925b453..5cbc7ee7e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json
@@ -320,37 +320,37 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-amq-amqp",
+ "name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "name": "amqp-ssl",
"containerPort": 5671,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-mqtt",
+ "name": "mqtt",
"containerPort": 1883,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp",
+ "name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "name": "stomp-ssl",
"containerPort": 61612,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp",
+ "name": "tcp",
"containerPort": 61616,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "name": "tcp-ssl",
"containerPort": 61617,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json b/roles/openshift_examples/files/examples/xpaas-templates/amq6.json
index 63ec9a235..7decdfe52 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/amq6.json
@@ -311,37 +311,37 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-amq-amqp",
+ "name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "name": "amqp-ssl",
"containerPort": 5671,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-mqtt",
+ "name": "mqtt",
"containerPort": 1883,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp",
+ "name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "name": "stomp-ssl",
"containerPort": 61612,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp",
+ "name": "tcp",
"containerPort": 61616,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "name": "tcp-ssl",
"containerPort": 61617,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
index 0790d6546..b64acae8b 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
@@ -309,24 +309,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -397,17 +398,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -538,37 +539,37 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-amq-amqp",
+ "name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "name": "amqp-ssl",
"containerPort": 5671,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-mqtt",
+ "name": "mqtt",
"containerPort": 1883,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp",
+ "name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "name": "stomp-ssl",
"containerPort": 61612,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp",
+ "name": "tcp",
"containerPort": 61616,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "name": "tcp-ssl",
"containerPort": 61617,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
index a0505f81e..20b234bd0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
@@ -304,24 +304,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -392,17 +393,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -533,37 +534,37 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-amq-amqp",
+ "name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "name": "amqp-ssl",
"containerPort": 5671,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-mqtt",
+ "name": "mqtt",
"containerPort": 1883,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp",
+ "name": "stomp",
"containerPort": 61613,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "name": "stomp-ssl",
"containerPort": 61612,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp",
+ "name": "tcp",
"containerPort": 61616,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "name": "tcp-ssl",
"containerPort": 61617,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
index d894deda0..146bfb1ee 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
@@ -181,24 +181,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -261,12 +262,12 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
index 14585d60f..0497e6824 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
@@ -248,24 +248,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -336,17 +337,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
index 13d4289ae..289ab284f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
@@ -328,24 +328,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -416,17 +417,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -568,7 +569,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
index 690b918a1..22b301aa9 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
@@ -323,24 +323,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -411,17 +412,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -563,7 +564,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
index 27062596f..648a53199 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
@@ -330,24 +330,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -418,17 +419,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -570,7 +571,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
index 69537e697..83d5c8b18 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
@@ -325,24 +325,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -413,17 +414,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -565,7 +566,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
index 1325e26de..53b953b7e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
@@ -318,24 +318,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -406,17 +407,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -558,7 +559,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
index 83921c57e..9d660cb42 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
@@ -313,24 +313,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -401,17 +402,17 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-ping-8888",
+ "name": "ping",
"containerPort": 8888,
"protocol": "TCP"
}
@@ -553,7 +554,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
index a3947593c..d74c2dfe3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
@@ -151,24 +151,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -231,7 +232,7 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
index 1f9c17cf2..b94142135 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
@@ -218,24 +218,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -306,12 +307,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
index 55746fc11..0c7b7d8e3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
@@ -299,24 +299,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -387,12 +388,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -522,7 +523,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
index f540c885c..892f27fe3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
@@ -294,24 +294,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -382,12 +383,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -517,7 +518,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
index 984b075a2..547449010 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
@@ -301,24 +301,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -389,12 +390,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -519,7 +520,6 @@
"image": "mysql",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
index af9c7c17c..2ae59ec71 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
@@ -296,24 +296,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -384,12 +385,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -514,7 +515,6 @@
"image": "mysql",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
index ec59a6a5b..b871b48d0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
@@ -289,24 +289,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -377,12 +378,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -507,7 +508,6 @@
"image": "postgresql",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
index 6040f7208..384ff1b8f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
@@ -284,24 +284,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -372,12 +373,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -502,7 +503,6 @@
"image": "postgresql",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
index 25b6497b2..3c7812b69 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
@@ -151,24 +151,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -231,7 +232,7 @@
},
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
index af4c94ae0..d725e0606 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
@@ -218,24 +218,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -306,12 +307,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
index b5975b646..cf35d0024 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
@@ -299,24 +299,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -387,12 +388,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -522,7 +523,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
index 8145ee34e..a993024f4 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
@@ -294,24 +294,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -382,12 +383,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -517,7 +518,6 @@
"imagePullPolicy": "Always",
"ports": [
{
- "name": "${APPLICATION_NAME}-mongodb-tcp-27017",
"containerPort": 27017,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
index b0808949c..0692817bf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
@@ -301,24 +301,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -389,12 +390,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -519,7 +520,6 @@
"image": "mysql",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
index 46ad18b91..226a983b7 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
@@ -296,24 +296,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -384,12 +385,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -514,7 +515,6 @@
"image": "mysql",
"ports": [
{
- "name": "${APPLICATION_NAME}-mysql-tcp-3306",
"containerPort": 3306,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
index 19cd2be30..b4644ac08 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
@@ -289,24 +289,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -377,12 +378,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -507,7 +508,6 @@
"image": "postgresql",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
index 185808baf..b46f23225 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
@@ -284,24 +284,25 @@
},
"output": {
"to": {
- "name": "${APPLICATION_NAME}"
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
}
},
"triggers": [
{
- "type": "github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
},
{
- "type": "generic",
+ "type": "Generic",
"generic": {
"secret": "${GENERIC_TRIGGER_SECRET}"
}
},
{
- "type": "imageChange",
+ "type": "ImageChange",
"imageChange": {}
}
]
@@ -372,12 +373,12 @@
],
"ports": [
{
- "name": "${APPLICATION_NAME}-tcp-8080",
+ "name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
- "name": "${APPLICATION_NAME}-tcp-8443",
+ "name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
@@ -502,7 +503,6 @@
"image": "postgresql",
"ports": [
{
- "name": "${APPLICATION_NAME}-postgresql-tcp-5432",
"containerPort": 5432,
"protocol": "TCP"
}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 727861b07..4e0989c5f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -300,8 +300,7 @@ def set_registry_url_if_unset(facts):
if deployment_type == 'enterprise':
registry_url = "openshift3/ose-${component}:${version}"
elif deployment_type == 'online':
- registry_url = ("docker-registry.ops.rhcloud.com/"
- "openshift3/ose-${component}:${version}")
+ registry_url = ("openshift3/ose-${component}:${version}")
facts[role]['registry_url'] = registry_url
return facts
@@ -367,9 +366,11 @@ def set_url_facts_if_unset(facts):
console_path = facts['master']['console_path']
etcd_use_ssl = facts['master']['etcd_use_ssl']
etcd_hosts = facts['master']['etcd_hosts']
- etcd_port = facts['master']['etcd_port'],
+ etcd_port = facts['master']['etcd_port']
hostname = facts['common']['hostname']
public_hostname = facts['common']['public_hostname']
+ cluster_hostname = facts['master'].get('cluster_hostname')
+ cluster_public_hostname = facts['master'].get('cluster_public_hostname')
if 'etcd_urls' not in facts['master']:
etcd_urls = []
@@ -384,24 +385,51 @@ def set_url_facts_if_unset(facts):
etcd_port)]
facts['master']['etcd_urls'] = etcd_urls
if 'api_url' not in facts['master']:
- facts['master']['api_url'] = format_url(api_use_ssl, hostname,
+ api_hostname = cluster_hostname if cluster_hostname else hostname
+ facts['master']['api_url'] = format_url(api_use_ssl, api_hostname,
api_port)
if 'public_api_url' not in facts['master']:
+ api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_api_url'] = format_url(api_use_ssl,
- public_hostname,
+ api_public_hostname,
api_port)
if 'console_url' not in facts['master']:
+ console_hostname = cluster_hostname if cluster_hostname else hostname
facts['master']['console_url'] = format_url(console_use_ssl,
- hostname,
+ console_hostname,
console_port,
console_path)
if 'public_console_url' not in facts['master']:
+ console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_console_url'] = format_url(console_use_ssl,
- public_hostname,
+ console_public_hostname,
console_port,
console_path)
return facts
+def set_aggregate_facts(facts):
+ """ Set aggregate facts
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with aggregated facts
+ """
+ all_hostnames = set()
+ if 'common' in facts:
+ all_hostnames.add(facts['common']['hostname'])
+ all_hostnames.add(facts['common']['public_hostname'])
+
+ if 'master' in facts:
+ if 'cluster_hostname' in facts['master']:
+ all_hostnames.add(facts['master']['cluster_hostname'])
+ if 'cluster_public_hostname' in facts['master']:
+ all_hostnames.add(facts['master']['cluster_public_hostname'])
+
+ facts['common']['all_hostnames'] = list(all_hostnames)
+
+ return facts
+
def set_sdn_facts_if_unset(facts):
""" Set sdn facts if not already present in facts dict
@@ -675,6 +703,7 @@ class OpenShiftFacts(object):
facts = set_identity_providers_if_unset(facts)
facts = set_registry_url_if_unset(facts)
facts = set_sdn_facts_if_unset(facts)
+ facts = set_aggregate_facts(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -713,7 +742,7 @@ class OpenShiftFacts(object):
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
- oauth_grant_method='auto')
+ oauth_grant_method='auto', cluster_defer_ha=False)
defaults['master'] = master
if 'node' in roles:
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index d71e6d019..b2cda3a85 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -4,6 +4,7 @@
that:
- ansible_version | version_compare('1.8.0', 'ge')
- ansible_version | version_compare('1.9.0', 'ne')
+ - ansible_version | version_compare('1.9.0.1', 'ne')
- name: Gather OpenShift facts
openshift_facts:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index d17f3f532..74e702248 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -16,3 +16,10 @@
command: >
{{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true
with_items: openshift_scheduleable_nodes
+
+- name: Label nodes
+ command: >
+ {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }}
+ with_items:
+ - "{{ openshift_node_vars }}"
+ when: "'labels' in item.openshift.node and item.openshift.node.labels != {}"
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 3178e318c..19f77d145 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -7,7 +7,7 @@ Requirements
------------
A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extras-rpms, and rhel-server-7-ose-beta-rpms repos.
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 11195e83e..ca8860099 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -15,6 +15,12 @@ os_firewall_allow:
port: 24224/tcp
- service: Fluentd td-agent udp
port: 24224/udp
+- service: pcsd
+ port: 2224/tcp
+- service: Corosync UDP
+ port: 5404/udp
+- service: Corosync UDP
+ port: 5405/udp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 6fd4dfb51..f1e7e1ab3 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,3 +1,4 @@
---
- name: restart openshift-master
service: name=openshift-master state=restarted
+ when: not openshift_master_ha | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 02905f32d..3ee21b902 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -8,6 +8,10 @@
- openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
when: openshift_master_oauth_grant_method is defined
+- fail:
+ msg: "openshift_master_cluster_password must be set for multi-master installations"
+ when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+
- name: Install OpenShift Master package
yum: pkg=openshift-master state=present
register: install_result
@@ -16,6 +20,9 @@
openshift_facts:
role: master
local_facts:
+ cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
+ cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
+ cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -46,6 +53,11 @@
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
+ default_subdomain: "{{ osm_default_subdomain | default(None) }}"
+ custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
+ default_node_selector: "{{ osm_default_node_selector | default(None) }}"
+ api_server_args: "{{ osm_api_server_args | default(None) }}"
+ controller_args: "{{ osm_controller_args | default(None) }}"
# TODO: These values need to be configurable
- name: Set dns OpenShift facts
@@ -114,12 +126,26 @@
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
+ when: not openshift_master_ha | bool
register: start_result
- name: pause to prevent service restart from interfering with bootstrapping
pause: seconds=30
when: start_result | changed
+- name: Install cluster packages
+ yum: pkg=pcs state=present
+ when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ register: install_result
+
+- name: Start and enable cluster service
+ service: name=pcsd enabled=yes state=started
+ when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+
+- name: Set the cluster user password
+ shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
+ when: install_result | changed
+
- name: Create the OpenShift client config dir(s)
file:
path: "~{{ item }}/.kube"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index bc766ec9b..44567aa22 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -2,6 +2,9 @@ apiLevels:
- v1beta3
- v1
apiVersion: v1
+{% if api_server_args is defined and api_server_args %}
+apiServerArguments: {{ api_server_args }}
+{% endif %}
assetConfig:
logoutURL: ""
masterPublicURL: {{ openshift.master.public_api_url }}
@@ -13,11 +16,16 @@ assetConfig:
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if controller_args is defined and controller_args %}
+controllerArguments: {{ controller_args }}
+{% endif %}
corsAllowedOrigins:
-{# TODO: add support for user specified corsAllowedOrigins #}
{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
- {{ origin }}
{% endfor %}
+{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
+ - {{ custom_origin }}
+{% endfor %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
@@ -93,7 +101,7 @@ policyConfig:
openshiftSharedResourcesNamespace: openshift
{# TODO: Allow users to override projectConfig items #}
projectConfig:
- defaultNodeSelector: ""
+ defaultNodeSelector: "{{ openshift.master.default_node_selector | default("") }}"
projectRequestMessage: ""
projectRequestTemplate: ""
securityAllocator:
@@ -101,12 +109,13 @@ projectConfig:
mcsLabelsPerProject: 5
uidAllocatorRange: 1000000000-1999999999/10000
routingConfig:
- subdomain: router.default.local
+ subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
managedNames:
- default
- builder
- deployer
+ masterCA: ca.crt
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
- serviceaccounts.public.key
diff --git a/roles/openshift_master/templates/scheduler.json.j2 b/roles/openshift_master/templates/scheduler.json.j2
index 833e7f3e1..835f2383e 100644
--- a/roles/openshift_master/templates/scheduler.json.j2
+++ b/roles/openshift_master/templates/scheduler.json.j2
@@ -1,5 +1,6 @@
{
"predicates": [
+ {"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsPorts"},
{"name": "NoDiskConflict"},
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
index f6fd88c65..4ca644876 100644
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -10,6 +10,20 @@
{{ key }}: {{ identity_provider[key] }}"
{% endif %}
{% endfor %}
+{% elif identity_provider.kind == 'LDAPPasswordIdentityProvider' %}
+ attributes:
+{% for attribute_key in identity_provider.attributes %}
+ {{ attribute_key }}:
+{% for attribute_value in identity_provider.attributes[attribute_key] %}
+ - {{ attribute_value }}
+{% endfor %}
+{% endfor %}
+{% for key in ('bindDN', 'bindPassword', 'ca') %}
+ {{ key }}: "{{ identity_provider[key] }}"
+{% endfor %}
+{% for key in ('insecure', 'url') %}
+ {{ key }}: {{ identity_provider[key] }}
+{% endfor %}
{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
headers: {{ identity_provider.headers }}
{% if 'clientCA' in identity_provider %}
diff --git a/roles/openshift_master_ca/meta/main.yml b/roles/openshift_master_ca/meta/main.yml
index f3236e850..0c8881521 100644
--- a/roles/openshift_master_ca/meta/main.yml
+++ b/roles/openshift_master_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 8163ecd7f..03eb7e15f 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -14,7 +14,7 @@
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index b5a3f8e40..bb23cf4f4 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -7,14 +7,36 @@
with_items: masters_needing_certs
- file:
- src: "{{ openshift_master_ca_cert }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/ca.crt"
- with_items: masters_needing_certs
+ src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
+ dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: hard
+ with_nested:
+ - masters_needing_certs
+ - - ca.crt
+ - ca.key
+ - ca.serial.txt
+ - admin.crt
+ - admin.key
+ - admin.kubeconfig
+ - master.kubelet-client.crt
+ - master.kubelet-client.key
+ - openshift-master.crt
+ - openshift-master.key
+ - openshift-master.kubeconfig
+ - openshift-registry.crt
+ - openshift-registry.key
+ - openshift-registry.kubeconfig
+ - openshift-router.crt
+ - openshift-router.key
+ - openshift-router.kubeconfig
+ - serviceaccounts.private.key
+ - serviceaccounts.public.key
+
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ item.openshift.common.hostname }},{{ item.openshift.common.public_hostname }}
+ --hostnames={{ item.openshift.common.all_hostnames | join(',') }}
--master={{ item.openshift.master.api_url }}
--public-master={{ item.openshift.master.public_api_url }}
--cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
@@ -22,3 +44,5 @@
args:
creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt"
with_items: masters_needing_certs
+
+
diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml
index 6e577b13b..6214f7918 100644
--- a/roles/openshift_master_certificates/vars/main.yml
+++ b/roles/openshift_master_certificates/vars/main.yml
@@ -1,6 +1,3 @@
---
openshift_generated_configs_dir: /etc/openshift/generated-configs
openshift_master_config_dir: /etc/openshift/master
-openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
-openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
-openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md
new file mode 100644
index 000000000..f150981fa
--- /dev/null
+++ b/roles/openshift_master_cluster/README.md
@@ -0,0 +1,34 @@
+OpenShift Master Cluster
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml
new file mode 100644
index 000000000..0c8881521
--- /dev/null
+++ b/roles/openshift_master_cluster/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_repos }
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
new file mode 100644
index 000000000..8ddc8bfda
--- /dev/null
+++ b/roles/openshift_master_cluster/tasks/configure.yml
@@ -0,0 +1,44 @@
+---
+- fail:
+ msg: This role requires that openshift_master_cluster_vip is set
+ when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip
+- fail:
+ msg: This role requires that openshift_master_cluster_public_vip is set
+ when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip
+
+- name: Authenticate to the cluster
+ command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }}
+
+- name: Create the cluster
+ command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }}
+
+- name: Start the cluster
+ command: pcs cluster start --all
+
+- name: Enable the cluster on all nodes
+ command: pcs cluster enable --all
+
+- name: Set default resource stickiness
+ command: pcs resource defaults resource-stickiness=100
+
+- name: Add the cluster VIP resource
+ command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group openshift-master
+
+- name: Add the cluster public VIP resource
+ command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group openshift-master
+ when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
+
+- name: Add the cluster openshift-master service resource
+ command: pcs resource create master systemd:openshift-master op start timeout=90s stop timeout=90s --group openshift-master
+
+- name: Disable stonith
+ command: pcs property set stonith-enabled=false
+
+# TODO: handle case where api port is not 8443
+- name: Wait for the clustered master service to be available
+ wait_for:
+ host: "{{ openshift_master_cluster_vip }}"
+ port: 8443
+ state: started
+ timeout: 180
+ delay: 90
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
new file mode 100644
index 000000000..a80b6c5b4
--- /dev/null
+++ b/roles/openshift_master_cluster/tasks/configure_deferred.yml
@@ -0,0 +1,8 @@
+---
+- debug: msg="Deferring config"
+
+- name: Start and enable openshift-master
+ service:
+ name: openshift-master
+ state: started
+ enabled: yes
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
new file mode 100644
index 000000000..315947183
--- /dev/null
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: Test if cluster is already configured
+ command: pcs status
+ register: pcs_status
+ changed_when: false
+ failed_when: false
+ when: not openshift.master.cluster_defer_ha | bool
+
+- include: configure.yml
+ when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
+
+- include: configure_deferred.yml
+ when: openshift.master.cluster_defer_ha | bool
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index c3c17b848..300e6b495 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -9,7 +9,7 @@ Requirements
One or more OpenShift Master servers.
A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extras-rpms, and rhel-server-7-ose-beta-rpms repos.
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
@@ -34,6 +34,18 @@ openshift_common
Example Playbook
----------------
+Notes
+-----
+
+Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
+
+```
+oadm manage-node --schedulable=false ${NODE}
+oadm manage-node --evacuate ${NODE}
+oadm manage-node --schedulable=true ${NODE}
+````
+
+
TODO
License
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index f9c3d10e9..adffca252 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -36,6 +36,7 @@
registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
@@ -76,3 +77,8 @@
- name: Start and enable openshift-node
service: name=openshift-node enabled=yes state=started
+ register: start_result
+
+- name: pause to prevent service restart from interfering with bootstrapping
+ pause: seconds=30
+ when: start_result | changed
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7778a2a61..e6f75a4c0 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -8,6 +8,9 @@ imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
kind: NodeConfig
+{% if openshift.common.kubelet_args is defined and openshift.common.kubelet_args %}
+kubeletArguments: {{ kubelet_args }}
+{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
nodeName: {{ openshift.common.hostname }}
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 64a799dfb..57f71887b 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -25,7 +25,7 @@
command: >
{{ openshift.common.admin_binary }} create-server-cert
--cert=server.crt --key=server.key --overwrite=true
- --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
+ --hostnames={{ item.openshift.common.all_hostnames |join(",") }}
--signer-cert={{ openshift_master_ca_cert }}
--signer-key={{ openshift_master_ca_key }}
--signer-serial={{ openshift_master_ca_serial }}
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 6bbedd839..95b155b29 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -7,7 +7,7 @@ Requirements
------------
A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/os_zabbix/library/__init__.py
diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml
new file mode 100644
index 000000000..f585bcbb2
--- /dev/null
+++ b/roles/os_zabbix/library/test.yml
@@ -0,0 +1,92 @@
+---
+# This is a test playbook to create one of each of the zabbix ansible modules.
+# ensure that the zbxapi module is installed
+# ansible-playbook test.yml
+- name: Test zabbix ansible module
+ hosts: localhost
+ gather_facts: no
+ vars:
+ zbx_server: http://localhost/zabbix/api_jsonrpc.php
+ zbx_user: Admin
+ zbx_password: zabbix
+
+ pre_tasks:
+ - name: Create a template
+ zbx_template:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'test template'
+ register: template_output
+
+ - debug: var=template_output
+
+ - name: Create an item
+ zbx_item:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'test item'
+ key: 'kenny.item.1'
+ template_name: "{{ template_output.results[0].host }}"
+ register: item_output
+
+ - debug: var=item_output
+
+ - name: Create an trigger
+ zbx_trigger:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ expression: '{test template:kenny.item.1.last()}>2'
+ desc: 'Kenny desc'
+ register: trigger_output
+
+ - debug: var=trigger_output
+
+ - name: Create a hostgroup
+ zbx_hostgroup:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'kenny hostgroup'
+ register: hostgroup_output
+
+ - debug: var=hostgroup_output
+
+ - name: Create a host
+ zbx_host:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'kenny host'
+ hostgroups:
+ - 'kenny hostgroup'
+ register: host_output
+
+ - debug: var=host_output
+
+ - name: Create a usergroup
+ zbx_usergroup:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: kenny usergroup
+ rights:
+ - 'kenny hostgroup': rw
+ register: usergroup_output
+
+ - debug: var=usergroup_output
+
+ - name: Create a user
+ zbx_user:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ alias: kenny user
+ passwd: zabbix
+ usergroups:
+ - kenny usergroup
+ register: user_output
+
+ - debug: var=user_output
diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py
new file mode 100644
index 000000000..d75dfdea1
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_host.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+'''
+Zabbix host ansible module
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_group_ids(zapi, hostgroup_names):
+ '''
+ get hostgroups
+ '''
+ # Fetch groups by name
+ group_ids = []
+ for hgr in hostgroup_names:
+ content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}})
+ if content.has_key('result'):
+ group_ids.append({'groupid': content['result'][0]['groupid']})
+
+ return group_ids
+
+def get_template_ids(zapi, template_names):
+ '''
+ get related templates
+ '''
+ template_ids = []
+ # Fetch templates by name
+ for template_name in template_names:
+ content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
+ if content.has_key('result'):
+ template_ids.append({'templateid': content['results'][0]['templateid']})
+ return template_ids
+
+def main():
+ '''
+ Ansible module for zabbix host
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ hostgroup_names=dict(default=[], type='list'),
+ template_names=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ interfaces=dict(default=[], type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'host'
+ idname = "hostid"
+ hname = module.params['name']
+ state = module.params['state']
+
+ # selectInterfaces doesn't appear to be working but is needed.
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'host': hname},
+ 'selectGroups': 'groupid',
+ 'selectParentTemplates': 'templateid',
+ 'selectInterfaces': 'interfaceid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'host': hname,
+ 'groups': get_group_ids(zapi, module.params('hostgroup_names')),
+ 'templates': get_template_ids(zapi, module.params('template_names')),
+ 'interfaces': module.params.get('interfaces', [{'type': 1, # interface type, 1 = agent
+ 'main': 1, # default interface? 1 = true
+ 'useip': 1, # default interface? 1 = true
+ 'ip': '127.0.0.1', # default interface? 1 = true
+ 'dns': '', # dns for host
+ 'port': '10050', # port for interface? 10050
+ }])
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'templates' and zab_results.has_key('parentTemplates'):
+ if zab_results['parentTemplates'] != value:
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py
new file mode 100644
index 000000000..a1eb875d4
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_hostgroup.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+''' Ansible module for hostgroup
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix hostgroup ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def main():
+ ''' ansible module for hostgroup
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'hostgroup'
+ idname = "groupid"
+ hname = module.params['name']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'name': hname},
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': hname}
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py
new file mode 100644
index 000000000..57ec06463
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_item.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix items
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix item ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_value_type(value_type):
+ '''
+ Possible values:
+ 0 - numeric float;
+ 1 - character;
+ 2 - log;
+ 3 - numeric unsigned;
+ 4 - text
+ '''
+ vtype = 0
+ if 'int' in value_type:
+ vtype = 3
+ elif 'char' in value_type:
+ vtype = 1
+ elif 'str' in value_type:
+ vtype = 4
+
+ return vtype
+
+def main():
+ '''
+ ansible zabbix module for zbx_item
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ key=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
+ zabbix_type=dict(default=2, type='int'),
+ value_type=dict(default='int', type='str'),
+ applications=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'item'
+ idname = "itemid"
+ state = module.params['state']
+ key = module.params['key']
+ template_name = module.params['template_name']
+
+ content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
+ templateid = None
+ if content['result']:
+ templateid = content['result'][0]['templateid']
+ else:
+ module.exit_json(changed=False,
+ results='Error: Could find template with name %s for item.' % template_name,
+ state="Unkown")
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'key_': key},
+ 'selectApplications': 'applicationid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': module.params.get('name', module.params['key']),
+ 'key_': key,
+ 'hostid': templateid,
+ 'type': module.params['zabbix_type'],
+ 'value_type': get_value_type(module.params['value_type']),
+ 'applications': module.params['applications'],
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py
new file mode 100644
index 000000000..a49aecd0f
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_mediatype.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+'''
+ Ansible module for mediatype
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix mediatype ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+def get_mtype(mtype):
+ '''
+ Transport used by the media type.
+ Possible values:
+ 0 - email;
+ 1 - script;
+ 2 - SMS;
+ 3 - Jabber;
+ 100 - Ez Texting.
+ '''
+ mtype = mtype.lower()
+ media_type = None
+ if mtype == 'script':
+ media_type = 1
+ elif mtype == 'sms':
+ media_type = 2
+ elif mtype == 'jabber':
+ media_type = 3
+ elif mtype == 'script':
+ media_type = 100
+ else:
+ media_type = 0
+
+ return media_type
+
+def main():
+ '''
+ Ansible zabbix module for mediatype
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ mtype=dict(default=None, type='str'),
+ smtp_server=dict(default=None, type='str'),
+ smtp_helo=dict(default=None, type='str'),
+ smtp_email=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'mediatype'
+ idname = "mediatypeid"
+ description = module.params['description']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}})
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'description': description,
+ 'type': get_mtype(module.params['media_type']),
+ 'smtp_server': module.params['smtp_server'],
+ 'smtp_helo': module.params['smtp_helo'],
+ 'smtp_email': module.params['smtp_email'],
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if zab_results[key] != value and \
+ zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py
new file mode 100644
index 000000000..676fa7e49
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_template.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+'''
+Ansible module for template
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix template ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def main():
+ ''' Ansible module for template
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])
+ zapi = ZabbixAPI(zbc)
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'template'
+ idname = 'templateid'
+ tname = module.params['name']
+ state = module.params['state']
+ # get a template, see if it exists
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'host': tname},
+ 'selectParentTemplates': 'templateid',
+ 'selectGroups': 'groupid',
+ #'selectApplications': extend,
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'groups': module.params.get('groups', [{'groupid': '1'}]),
+ 'host': tname,
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'templates' and zab_results.has_key('parentTemplates'):
+ if zab_results['parentTemplates'] != value:
+ differences[key] = value
+ elif zab_results[key] != str(value) and zab_results[key] != value:
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=content['result'], state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py
new file mode 100644
index 000000000..7cc9356c8
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_trigger.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+'''
+ansible module for zabbix triggers
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix trigger ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+
+def get_priority(priority):
+ ''' determine priority
+ '''
+ prior = 0
+ if 'info' in priority:
+ prior = 1
+ elif 'warn' in priority:
+ prior = 2
+ elif 'avg' == priority or 'ave' in priority:
+ prior = 3
+ elif 'high' in priority:
+ prior = 4
+ elif 'dis' in priority:
+ prior = 5
+
+ return prior
+
+def get_deps(zapi, deps):
+ ''' get trigger dependencies
+ '''
+ results = []
+ for desc in deps:
+ content = zapi.get_content('trigger',
+ 'get',
+ {'search': {'description': desc},
+ 'expandExpression': True,
+ 'selectDependencies': 'triggerid',
+ })
+ if content.has_key('result'):
+ results.append({'triggerid': content['result'][0]['triggerid']})
+
+ return results
+
+def main():
+ '''
+ Create a trigger in zabbix
+
+ Example:
+ "params": {
+ "description": "Processor load is too high on {HOST.NAME}",
+ "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5",
+ "dependencies": [
+ {
+ "triggerid": "14062"
+ }
+ ]
+ },
+
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ expression=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ dependencies=dict(default=[], type='list'),
+ priority=dict(default='avg', type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'trigger'
+ idname = "triggerid"
+ state = module.params['state']
+ description = module.params['description']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'description': description},
+ 'expandExpression': True,
+ 'selectDependencies': 'triggerid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'description': description,
+ 'expression': module.params['expression'],
+ 'dependencies': get_deps(zapi, module.params['dependencies']),
+ 'priority': get_priority(module.params['priority']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py
new file mode 100644
index 000000000..489023407
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_user.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+'''
+ansible module for zabbix users
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix user ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_usergroups(zapi, usergroups):
+ ''' Get usergroups
+ '''
+ ugroups = []
+ for ugr in usergroups:
+ content = zapi.get_content('usergroup',
+ 'get',
+ {'search': {'name': ugr},
+ #'selectUsers': 'userid',
+ #'getRights': 'extend'
+ })
+ if content['result']:
+ ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
+
+ return ugroups
+
+def main():
+ '''
+ ansible zabbix module for users
+ '''
+
+ ##def user(self, name, state='present', params=None):
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ alias=dict(default=None, type='str'),
+ passwd=dict(default=None, type='str'),
+ usergroups=dict(default=None, type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ password = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zbc = ZabbixConnection(module.params['server'], user, password, module.params['debug'])
+ zapi = ZabbixAPI(zbc)
+
+ ## before we can create a user media and users with media types we need media
+ zbx_class_name = 'user'
+ idname = "userid"
+ alias = module.params['alias']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'output': 'extend',
+ 'search': {'alias': alias},
+ "selectUsrgrps": 'usergrpid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'alias': alias,
+ 'passwd': module.params['passwd'],
+ 'usrgrps': get_usergroups(zapi, module.params['usergroups']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'passwd':
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py
new file mode 100644
index 000000000..ede4c9df1
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_usergroup.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+'''
+zabbix ansible module for usergroups
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix usergroup ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_rights(zapi, rights):
+ '''Get rights
+ '''
+ perms = []
+ for right in rights:
+ hstgrp = right.keys()[0]
+ perm = right.values()[0]
+ content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}})
+ if content['result']:
+ permission = 0
+ if perm == 'ro':
+ permission = 2
+ elif perm == 'rw':
+ permission = 3
+ perms.append({'id': content['result'][0]['groupid'],
+ 'permission': permission})
+ return perms
+
+def get_userids(zapi, users):
+ ''' Get userids from user aliases
+ '''
+ userids = []
+ for alias in users:
+ content = zapi.get_content('user', 'get', {'search': {'alias': alias}})
+ if content['result']:
+ userids.append(content['result'][0]['userid'])
+
+ return userids
+
+def main():
+ ''' Ansible module for usergroup
+ '''
+
+ ##def usergroup(self, name, rights=None, users=None, state='present', params=None):
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ rights=dict(default=[], type='list'),
+ users=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ zbx_class_name = 'usergroup'
+ idname = "usrgrpid"
+ uname = module.params['name']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'name': uname},
+ 'selectUsers': 'userid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': uname,
+ 'rights': get_rights(zapi, module.params['rights']),
+ 'userids': get_userids(zapi, module.params['users']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'rights':
+ differences['rights'] = value
+
+ elif key == 'userids' and zab_results.has_key('users'):
+ if zab_results['users'] != value:
+ differences['userids'] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
deleted file mode 100755
index 48f294938..000000000
--- a/roles/os_zabbix/library/zbxapi.py
+++ /dev/null
@@ -1,382 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
- ZabbixAPI ansible module
-'''
-
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Purpose: An ansible module to communicate with zabbix.
-#
-
-# pylint: disable=line-too-long
-# Disabling line length for readability
-
-import json
-import httplib2
-import sys
-import os
-import re
-import copy
-
-class ZabbixAPIError(Exception):
- '''
- ZabbixAPIError
- Exists to propagate errors up from the api
- '''
- pass
-
-class ZabbixAPI(object):
- '''
- ZabbixAPI class
- '''
- classes = {
- 'Action': ['create', 'delete', 'get', 'update'],
- 'Alert': ['get'],
- 'Application': ['create', 'delete', 'get', 'massadd', 'update'],
- 'Configuration': ['export', 'import'],
- 'Dcheck': ['get'],
- 'Dhost': ['get'],
- 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Dservice': ['get'],
- 'Event': ['acknowledge', 'get'],
- 'Graph': ['create', 'delete', 'get', 'update'],
- 'Graphitem': ['get'],
- 'Graphprototype': ['create', 'delete', 'get', 'update'],
- 'History': ['get'],
- 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'],
- 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Image': ['create', 'delete', 'get', 'update'],
- 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Maintenance': ['create', 'delete', 'get', 'update'],
- 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Mediatype': ['create', 'delete', 'get', 'update'],
- 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Screen': ['create', 'delete', 'get', 'update'],
- 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'],
- 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'],
- 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'],
- 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Templatescreenitem': ['get'],
- 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'],
- 'Triggerprototype': ['create', 'delete', 'get', 'update'],
- 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'],
- 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'],
- 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'],
- 'Usermedia': ['get'],
- }
-
- def __init__(self, data=None):
- if not data:
- data = {}
- self.server = data.get('server', None)
- self.username = data.get('user', None)
- self.password = data.get('password', None)
- if any([value == None for value in [self.server, self.username, self.password]]):
- print 'Please specify zabbix server url, username, and password.'
- sys.exit(1)
-
- self.verbose = data.get('verbose', False)
- self.use_ssl = data.has_key('use_ssl')
- self.auth = None
-
- for cname, _ in self.classes.items():
- setattr(self, cname.lower(), getattr(self, cname)(self))
-
- # pylint: disable=no-member
- # This method does not exist until the metaprogramming executed
- results = self.user.login(user=self.username, password=self.password)
-
- if results[0]['status'] == '200':
- if results[1].has_key('result'):
- self.auth = results[1]['result']
- elif results[1].has_key('error'):
- print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error'])
- sys.exit(1)
- else:
- print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
- sys.exit(1)
-
- def perform(self, method, rpc_params):
- '''
- This method calls your zabbix server.
-
- It requires the following parameters in order for a proper request to be processed:
- jsonrpc - the version of the JSON-RPC protocol used by the API;
- the Zabbix API implements JSON-RPC version 2.0;
- method - the API method being called;
- rpc_params - parameters that will be passed to the API method;
- id - an arbitrary identifier of the request;
- auth - a user authentication token; since we don't have one yet, it's set to null.
- '''
- http_method = "POST"
- jsonrpc = "2.0"
- rid = 1
-
- http = None
- if self.use_ssl:
- http = httplib2.Http()
- else:
- http = httplib2.Http(disable_ssl_certificate_validation=True,)
-
- headers = {}
- headers["Content-type"] = "application/json"
-
- body = {
- "jsonrpc": jsonrpc,
- "method": method,
- "params": rpc_params.get('params', {}),
- "id": rid,
- 'auth': self.auth,
- }
-
- if method in ['user.login', 'api.version']:
- del body['auth']
-
- body = json.dumps(body)
-
- if self.verbose:
- print body
- print method
- print headers
- httplib2.debuglevel = 1
-
- response, content = http.request(self.server, http_method, body, headers)
-
- if response['status'] not in ['200', '201']:
- raise ZabbixAPIError('Error calling zabbix. Zabbix returned %s' % response['status'])
-
- if self.verbose:
- print response
- print content
-
- try:
- content = json.loads(content)
- except ValueError as err:
- content = {"error": err.message}
-
- return response, content
-
- @staticmethod
- def meta(cname, method_names):
- '''
- This bit of metaprogramming is where the ZabbixAPI subclasses are created.
- For each of ZabbixAPI.classes we create a class from the key and methods
- from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
- to each subclass in order for each to be able to call the perform method.
- '''
- def meta_method(_class, method_name):
- '''
- This meta method allows a class to add methods to it.
- '''
- # This template method is a stub method for each of the subclass
- # methods.
- def template_method(self, params=None, **rpc_params):
- '''
- This template method is a stub method for each of the subclass methods.
- '''
- if params:
- rpc_params['params'] = params
- else:
- rpc_params['params'] = copy.deepcopy(rpc_params)
-
- return self.parent.perform(cname.lower()+"."+method_name, rpc_params)
-
- template_method.__doc__ = \
- "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % \
- (cname.lower(), method_name)
- template_method.__name__ = method_name
- # this is where the template method is placed inside of the subclass
- # e.g. setattr(User, "create", stub_method)
- setattr(_class, template_method.__name__, template_method)
-
- # This class call instantiates a subclass. e.g. User
- _class = type(cname,
- (object,),
- {'__doc__': \
- "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % cname.lower()})
- def __init__(self, parent):
- '''
- This init method gets placed inside of the _class
- to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
- is passed in to allow each class access to the perform method.
- '''
- self.parent = parent
-
- # This attaches the init to the subclass. e.g. Create
- setattr(_class, __init__.__name__, __init__)
- # For each of our ZabbixAPI.classes dict values
- # Create a method and attach it to our subclass.
- # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
- # 'update', 'iswritable', 'logout', 'addmedia', 'create',
- # 'login', 'deletemedia', 'isreadable'],
- # User.delete
- # User.get
- for method_name in method_names:
- meta_method(_class, method_name)
- # Return our subclass with all methods attached
- return _class
-
-# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
-for _class_name, _method_names in ZabbixAPI.classes.items():
- setattr(ZabbixAPI, _class_name, ZabbixAPI.meta(_class_name, _method_names))
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def diff_content(from_zabbix, from_user, ignore=None):
- ''' Compare passed in object to results returned from zabbix
- '''
- terms = ['search', 'output', 'groups', 'select', 'expand', 'filter']
- if ignore:
- terms.extend(ignore)
- regex = '(' + '|'.join(terms) + ')'
- retval = {}
- for key, value in from_user.items():
- if re.findall(regex, key):
- continue
-
- # special case here for templates. You query templates and
- # the zabbix api returns parentTemplates. These will obviously fail.
- # So when its templates compare against parentTemplates.
- if key == 'templates' and from_zabbix.has_key('parentTemplates'):
- if from_zabbix['parentTemplates'] != value:
- retval[key] = value
-
- elif from_zabbix[key] != str(value):
- retval[key] = str(value)
-
- return retval
-
-def main():
- '''
- This main method runs the ZabbixAPI Ansible Module
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- user=dict(default=None, type='str'),
- password=dict(default=None, type='str'),
- zbx_class=dict(choices=ZabbixAPI.classes.keys()),
- params=dict(),
- debug=dict(default=False, type='bool'),
- state=dict(default='present', type='str'),
- ignore=dict(default=None, type='list'),
- ),
- #supports_check_mode=True
- )
-
- user = module.params.get('user', None)
- if not user:
- user = os.environ['ZABBIX_USER']
-
- passwd = module.params.get('password', None)
- if not passwd:
- passwd = os.environ['ZABBIX_PASSWORD']
-
-
-
- api_data = {
- 'user': user,
- 'password': passwd,
- 'server': module.params['server'],
- 'verbose': module.params['debug']
- }
-
- if not user or not passwd or not module.params['server']:
- module.fail_json(msg='Please specify the user, password, and the zabbix server.')
-
- zapi = ZabbixAPI(api_data)
-
- ignore = module.params['ignore']
- zbx_class = module.params.get('zbx_class')
- rpc_params = module.params.get('params', {})
- state = module.params.get('state')
-
-
- # Get the instance we are trying to call
- zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
-
- # perform get
- # Get the instance's method we are trying to call
-
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['get']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
-
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
- # If we are coming from a query, we need to pass in the correct rpc_params for delete.
- # specifically the zabbix class name + 'id'
- # if rpc_params is a list then we need to pass it. (list of ids to delete)
- idname = zbx_class.lower() + "id"
- if not isinstance(rpc_params, list) and content['result'][0].has_key(idname):
- rpc_params = [content['result'][0][idname]]
-
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['delete']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- # It's not there, create it!
- if not exists(content):
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['create']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
- module.exit_json(changed=True, results=content['result'], state='present')
-
- # It's there and the same, do nothing!
- diff_params = diff_content(content['result'][0], rpc_params, ignore)
- if not diff_params:
- module.exit_json(changed=False, results=content['result'], state="present")
-
- # Add the id to update with
- idname = zbx_class.lower() + "id"
- diff_params[idname] = content['result'][0][idname]
-
-
- ## It's there and not the same, update it!
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['update']
- _, content = zbx_action_method(zbx_class_inst, diff_params)
- module.exit_json(changed=True, results=content, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
-
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
new file mode 100644
index 000000000..fc4d44745
--- /dev/null
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -0,0 +1,5 @@
+---
+- name: Enable RHEL repositories
+ command: subscription-manager repos \
+ --enable="rhel-7-server-rpms" \
+ --enable="rhel-7-server-ose-3.0-rpms"
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
new file mode 100644
index 000000000..8fb2fc042
--- /dev/null
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+# TODO: Enhance redhat_subscription module
+# to make it able to attach to a pool
+# to make it able to enable repositories
+
+- set_fact:
+ rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
+ rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
+
+- fail:
+ msg: "This role is only supported for Red Hat hosts"
+ when: ansible_distribution != 'RedHat'
+
+- fail:
+ msg: Either rsub_user or the rhel_subscription_user env variable are required for this role.
+ when: rhel_subscription_user is not defined
+
+- fail:
+ msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role.
+ when: rhel_subscription_pass is not defined
+
+- name: RedHat subscriptions
+ redhat_subscription:
+ username: "{{ rhel_subscription_user }}"
+ password: "{{ rhel_subscription_pass }}"
+ autosubscribe: yes
+
+- include: enterprise.yml
+ when: deployment_type == 'enterprise'
diff --git a/roles/rhel_unsubscribe/tasks/main.yml b/roles/rhel_unsubscribe/tasks/main.yml
new file mode 100644
index 000000000..2aeb09d83
--- /dev/null
+++ b/roles/rhel_unsubscribe/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove RedHat subscriptions
+ redhat_subscription:
+ state: absent
+ when: ansible_distribution == "RedHat"