From 7b316631a2b988318b47d3a50a7b66e3ff3fdbd2 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 10 Jun 2015 10:31:39 -0400 Subject: Update for RC2 changes Remove openshift-deployer.kubeconfig from master template Sync config template Update enterprise image names Switch to node auto registration Add deployer to list of serviceAccountConfig.managedNames Move package installation before registering facts change default kubeconfig location Change system:openshift-client to system:openshift-master Rename node cert/key/kubeconfig per openshift/origin#3160 Update references to /var/lib/openshift/openshift.local.certificates --- playbooks/common/openshift-node/config.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 9e642f3d3..2d2560db4 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -27,10 +27,12 @@ stat: path: "{{ item }}" with_items: - - "/etc/openshift/node/node.key" - - "/etc/openshift/node/node.kubeconfig" + - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.crt" + - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.key" + - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.kubeconfig" - "/etc/openshift/node/ca.crt" - "/etc/openshift/node/server.key" + - "/etc/openshift/node/server.crt" register: stat_result - set_fact: certs_missing: "{{ stat_result.results | map(attribute='stat.exists') @@ -50,7 +52,7 @@ register: mktemp changed_when: False -- name: Register nodes +- name: Create node certificates hosts: oo_first_master vars: nodes_needing_certs: "{{ hostvars @@ -60,7 +62,7 @@ | oo_select_keys(groups['oo_nodes_to_config']) }}" sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" roles: - - openshift_register_nodes + - openshift_node_certificates post_tasks: - name: Create a tarball of the node config directories command: > -- cgit v1.2.3 From 15bcfb3e59e6e31c00e23725547f896c03c93290 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 19 Jun 2015 10:00:41 -0400 Subject: Add openshift_examples role This role installs db-templates, image-streams, and quickstart-templates into /usr/share/openshift/examples on the master and then uses `oc create` to import them. --- playbooks/common/openshift-master/config.yml | 1 + roles/openshift_examples/README.md | 45 ++ roles/openshift_examples/defaults/main.yml | 16 + .../db-templates/mongodb-ephemeral-template.json | 179 ++++++ .../db-templates/mongodb-persistent-template.json | 201 +++++++ .../db-templates/mysql-ephemeral-template.json | 169 ++++++ .../db-templates/mysql-persistent-template.json | 191 ++++++ .../postgresql-ephemeral-template.json | 169 ++++++ .../postgresql-persistent-template.json | 191 ++++++ .../image-streams/image-streams-centos7.json | 256 ++++++++ .../image-streams/image-streams-rhel7.json | 226 ++++++++ .../quickstart-templates/cakephp-mysql.json | 364 ++++++++++++ .../examples/quickstart-templates/cakephp.json | 266 +++++++++ .../quickstart-templates/dancer-mysql.json | 334 +++++++++++ .../examples/quickstart-templates/dancer.json | 200 +++++++ .../quickstart-templates/django-postgresql.json | 341 +++++++++++ .../examples/quickstart-templates/django.json | 254 ++++++++ .../quickstart-templates/nodejs-mongodb.json | 329 +++++++++++ .../examples/quickstart-templates/nodejs.json | 236 ++++++++ .../quickstart-templates/rails-postgresql.json | 388 +++++++++++++ .../xpaas-streams/jboss-image-streams.json | 157 +++++ .../examples/xpaas-templates/amq6-persistent.json | 399 +++++++++++++ .../files/examples/xpaas-templates/amq6.json | 366 ++++++++++++ .../examples/xpaas-templates/eap-app-secret.json | 32 + .../xpaas-templates/eap6-amq-persistent-sti.json | 643 +++++++++++++++++++++ .../examples/xpaas-templates/eap6-amq-sti.json | 606 +++++++++++++++++++ .../examples/xpaas-templates/eap6-basic-sti.json | 405 +++++++++++++ .../eap6-mongodb-persistent-sti.json | 619 ++++++++++++++++++++ .../examples/xpaas-templates/eap6-mongodb-sti.json | 582 +++++++++++++++++++ .../xpaas-templates/eap6-mysql-persistent-sti.json | 625 ++++++++++++++++++++ .../examples/xpaas-templates/eap6-mysql-sti.json | 588 +++++++++++++++++++ .../eap6-postgresql-persistent-sti.json | 601 +++++++++++++++++++ .../xpaas-templates/eap6-postgresql-sti.json | 564 ++++++++++++++++++ .../examples/xpaas-templates/jws-app-secret.json | 33 ++ .../xpaas-templates/jws-tomcat7-basic-sti.json | 359 ++++++++++++ .../jws-tomcat7-mongodb-persistent-sti.json | 573 ++++++++++++++++++ .../xpaas-templates/jws-tomcat7-mongodb-sti.json | 536 +++++++++++++++++ .../jws-tomcat7-mysql-persistent-sti.json | 574 ++++++++++++++++++ .../xpaas-templates/jws-tomcat7-mysql-sti.json | 537 +++++++++++++++++ .../jws-tomcat7-postgresql-persistent-sti.json | 550 ++++++++++++++++++ .../jws-tomcat7-postgresql-sti.json | 513 ++++++++++++++++ .../xpaas-templates/jws-tomcat8-basic-sti.json | 359 ++++++++++++ .../jws-tomcat8-mongodb-persistent-sti.json | 573 ++++++++++++++++++ .../xpaas-templates/jws-tomcat8-mongodb-sti.json | 536 +++++++++++++++++ .../jws-tomcat8-mysql-persistent-sti.json | 574 ++++++++++++++++++ .../xpaas-templates/jws-tomcat8-mysql-sti.json | 537 +++++++++++++++++ .../jws-tomcat8-postgresql-persistent-sti.json | 550 ++++++++++++++++++ .../jws-tomcat8-postgresql-sti.json | 513 ++++++++++++++++ roles/openshift_examples/meta/main.yml | 15 + roles/openshift_examples/tasks/main.yml | 53 ++ roles/openshift_examples/templates.sh | 31 + 51 files changed, 17959 insertions(+) create mode 100644 roles/openshift_examples/README.md create mode 100644 roles/openshift_examples/defaults/main.yml create mode 100644 roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json create mode 100644 roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json create mode 100644 roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json create mode 100644 roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json create mode 100644 roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json create mode 100644 roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json create mode 100644 roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json create mode 100644 roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/cakephp.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/dancer.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/django.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/nodejs.json create mode 100644 roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json create mode 100644 roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/amq6.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap-app-secret.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-app-secret.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json create mode 100644 roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json create mode 100644 roles/openshift_examples/meta/main.yml create mode 100644 roles/openshift_examples/tasks/main.yml create mode 100755 roles/openshift_examples/templates.sh (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 052ed14c7..29c4d9c5c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -3,6 +3,7 @@ hosts: oo_masters_to_config roles: - openshift_master + - openshift_examples - role: fluentd_master when: openshift.common.use_fluentd | bool tasks: diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md new file mode 100644 index 000000000..787624ab6 --- /dev/null +++ b/roles/openshift_examples/README.md @@ -0,0 +1,45 @@ +OpenShift Examples +================ + +Installs example image streams, db-templates, and quickstart-templates by copying +examples from this module to your first master and importing them with oc create -n into the openshift namespace + +Requirements +------------ + +Role Variables +-------------- + +| Name | Default value | | +|-------------------------------------|-----------------------------------------------------|------------------------------------------| +| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams | +| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams | +| openshift_examples_load_db_templates| true | Loads databcase templates | +| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc | +| openshift_examples_load_xpaas | false | Loads xpass streams and templates | + + +Dependencies +------------ + +Example Playbook +---------------- + +TODO +---- +Currently we use `oc create -f` against various files and we accept non zero return code as a success +if (and only iff) stderr also contains the string 'already exists'. This means that if one object in the file exists already +but others fail to create you won't be aware of the failure. This also means that we do not currently support +updating existing objects. + +We should add the ability to compare existing image streams against those we're being asked to load and update if necessary. + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Scott Dodson (sdodson@redhat.com) diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml new file mode 100644 index 000000000..0f8e9f7ee --- /dev/null +++ b/roles/openshift_examples/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# By default install rhel and xpaas streams on enterprise installs +openshift_examples_load_centos: "{{ openshift_deployment_type != 'enterprise' }}" +openshift_examples_load_rhel: "{{ openshift_deployment_type == 'enterprise' }}" +openshift_examples_load_db_templates: true +openshift_examples_load_xpaas: false +openshift_examples_load_quickstarts: true + +examples_base: /usr/share/openshift/examples +image_streams_base: "{{ examples_base }}/image-streams" +centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json" +rhel_image_streams: "{{ image_streams_base}}/image-streams-rhel7.json" +db_templates_base: "{{ examples_base }}/db-templates" +xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json" +xpaas_templates_base: "{{ examples_base }}/xpaas-templates" +quickstarts_base: "{{ examples_base }}/quickstart-templates" diff --git a/roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json new file mode 100644 index 000000000..6252da2ec --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json @@ -0,0 +1,179 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "mongodb-ephemeral", + "creationTimestamp": null, + "annotations": { + "description": "MongoDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-mongodb", + "tags": "database,mongodb" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "mongo", + "protocol": "TCP", + "port": 27017, + "targetPort": 27017, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mongodb:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mongodb", + "image": "mongodb", + "ports": [ + { + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mongodb/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mongodb" + }, + { + "name": "MONGODB_USER", + "description": "Username for MongoDB user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MONGODB_PASSWORD", + "description": "Password for the MongoDB user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "MONGODB_DATABASE", + "description": "Database name", + "value": "sampledb" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "description": "Password for the database admin user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + } + ], + "labels": { + "template": "mongodb-ephemeral-template" + } +} diff --git a/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json new file mode 100644 index 000000000..ff19a4834 --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json @@ -0,0 +1,201 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "mongodb-persistent", + "creationTimestamp": null, + "annotations": { + "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported", + "iconClass": "icon-mongodb", + "tags": "database,mongodb" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "mongo", + "protocol": "TCP", + "port": 27017, + "targetPort": 27017, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "spec": { + "accessModes": [ + "ReadWriteMany" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mongodb:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mongodb", + "image": "mongodb", + "ports": [ + { + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mongodb/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "persistentVolumeClaim": { + "claimName": "mongodb" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mongodb" + }, + { + "name": "MONGODB_USER", + "description": "Username for MongoDB user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MONGODB_PASSWORD", + "description": "Password for the MongoDB user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "MONGODB_DATABASE", + "description": "Database name", + "value": "sampledb" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "description": "Password for the database admin user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "VOLUME_CAPACITY", + "description": "Volume space available for data, e.g. 512Mi, 2Gi", + "value": "512Mi" + } + ], + "labels": { + "template": "mongodb-persistent-template" + } +} diff --git a/roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json new file mode 100644 index 000000000..697a4ad68 --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json @@ -0,0 +1,169 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "mysql-ephemeral", + "creationTimestamp": null, + "annotations": { + "description": "MySQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-mysql-database", + "tags": "database,mysql" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "mysql", + "protocol": "TCP", + "port": 3306, + "targetPort": 3306, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mysql:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mysql", + "image": "mysql", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${MYSQL_USER}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${MYSQL_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${MYSQL_DATABASE}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mysql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mysql" + }, + { + "name": "MYSQL_USER", + "description": "Username for MySQL user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MYSQL_PASSWORD", + "description": "Password for the MySQL user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "MYSQL_DATABASE", + "description": "Database name", + "value": "sampledb" + } + ], + "labels": { + "template": "mysql-ephemeral-template" + } +} diff --git a/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json new file mode 100644 index 000000000..90225e9c3 --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json @@ -0,0 +1,191 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "mysql-persistent", + "creationTimestamp": null, + "annotations": { + "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported", + "iconClass": "icon-mysql-database", + "tags": "database,mysql" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "mysql", + "protocol": "TCP", + "port": 3306, + "targetPort": 3306, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "spec": { + "accessModes": [ + "ReadWriteMany" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mysql:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mysql", + "image": "mysql", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${MYSQL_USER}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${MYSQL_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${MYSQL_DATABASE}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mysql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "persistentVolumeClaim": { + "claimName": "mysql" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mysql" + }, + { + "name": "MYSQL_USER", + "description": "Username for MySQL user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MYSQL_PASSWORD", + "description": "Password for the MySQL user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "MYSQL_DATABASE", + "description": "Database name", + "value": "sampledb" + }, + { + "name": "VOLUME_CAPACITY", + "description": "Volume space available for data, e.g. 512Mi, 2Gi", + "value": "512Mi" + } + ], + "labels": { + "template": "mysql-persistent-template" + } +} diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json new file mode 100644 index 000000000..6922baa12 --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json @@ -0,0 +1,169 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "postgresql-ephemeral", + "creationTimestamp": null, + "annotations": { + "description": "PostgreSQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-postgresql", + "tags": "database,postgresql" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "postgresql:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": "postgresql", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/pgsql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mysql" + }, + { + "name": "POSTGRESQL_USER", + "description": "Username for PostgreSQL user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "description": "Password for the PostgreSQL user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "POSTGRESQL_DATABASE", + "description": "Database name", + "value": "sampledb" + } + ], + "labels": { + "template": "postgresql-ephemeral-template" + } +} diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json new file mode 100644 index 000000000..43162d8bb --- /dev/null +++ b/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json @@ -0,0 +1,191 @@ +{ + "kind": "Template", + "apiVersion": "v1beta3", + "metadata": { + "name": "postgresql-persistent", + "creationTimestamp": null, + "annotations": { + "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported", + "iconClass": "icon-postgresql", + "tags": "database,postgresql" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "spec": { + "accessModes": [ + "ReadWriteMany" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1beta3", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate", + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "postgresql:latest", + "namespace": "openshift" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": "postgresql", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/pgsql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "persistentVolumeClaim": { + "claimName": "postgresql" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mysql" + }, + { + "name": "POSTGRESQL_USER", + "description": "Username for PostgreSQL user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "description": "Password for the PostgreSQL user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "POSTGRESQL_DATABASE", + "description": "Database name", + "value": "sampledb" + }, + { + "name": "VOLUME_CAPACITY", + "description": "Volume space available for data, e.g. 512Mi, 2Gi", + "value": "512Mi" + } + ], + "labels": { + "template": "postgresql-persistent-template" + } +} diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json new file mode 100644 index 000000000..712a43a11 --- /dev/null +++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json @@ -0,0 +1,256 @@ +{ + "kind": "ImageStreamList", + "apiVersion": "v1beta3", + "metadata": {}, + "items": [ + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "ruby", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/ruby-20-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "2.0", + "annotations": { + "description": "Build and run Ruby 2.0 applications", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.0,ruby", + "version": "2.0" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "nodejs", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/nodejs-010-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "0.10", + "annotations": { + "description": "Build and run NodeJS 0.10 applications", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "supports":"nodejs:0.10,nodejs:0.1,nodejs", + "version": "0.10" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "perl", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/perl-516-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.16", + "annotations": { + "description": "Build and run Perl 5.16 applications", + "iconClass": "icon-perl", + "tags": "builder,perl", + "supports":"perl:5.16,perl", + "version": "5.16" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "php", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/php-55-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.5", + "annotations": { + "description": "Build and run PHP 5.5 applications", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:5.5,php", + "version": "5.5" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "python", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/python-33-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "3.3", + "annotations": { + "description": "Build and run Python 3.3 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.3,python", + "version": "3.3" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "wildfly", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/wildfly-8-centos", + "tags": [ + { + "name": "latest" + }, + { + "name": "8", + "annotations": { + "description": "Build and run Java applications on Wildfly 8", + "iconClass": "icon-wildfly", + "tags": "builder,wildfly,java", + "supports":"wildfly:8,jee,java", + "version": "8" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "mysql", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/mysql-55-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.5", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "postgresql", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/postgresql-92-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "9.2", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "mongodb", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "openshift/mongodb-24-centos7", + "tags": [ + { + "name": "latest" + }, + { + "name": "2.4", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json new file mode 100644 index 000000000..a5d2e9d9f --- /dev/null +++ b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json @@ -0,0 +1,226 @@ +{ + "kind": "ImageStreamList", + "apiVersion": "v1beta3", + "metadata": {}, + "items": [ + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "ruby", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/ruby-20-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "2.0", + "annotations": { + "description": "Build and run Ruby 2.0 applications", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.0,ruby", + "version": "2.0" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "nodejs", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/nodejs-010-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "0.10", + "annotations": { + "description": "Build and run NodeJS 0.10 applications", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "supports":"nodejs:0.10,nodejs:0.1,nodejs", + "version": "0.10" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "perl", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/perl-516-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.16", + "annotations": { + "description": "Build and run Perl 5.16 applications", + "iconClass": "icon-perl", + "tags": "builder,perl", + "supports":"perl:5.16,perl", + "version": "5.16" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "php", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/php-55-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.5", + "annotations": { + "description": "Build and run PHP 5.5 applications", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:5.5,php", + "version": "5.5" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "python", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/python-33-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "3.3", + "annotations": { + "description": "Build and run Python 3.3 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.3,python", + "version": "3.3" + }, + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "mysql", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/mysql-55-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "5.5", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "postgresql", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/postgresql-92-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "9.2", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1beta3", + "metadata": { + "name": "mongodb", + "creationTimestamp": null + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3/mongodb-24-rhel7", + "tags": [ + { + "name": "latest" + }, + { + "name": "2.4", + "from": { + "Kind": "ImageStreamTag", + "Name": "latest" + } + } + ] + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json new file mode 100644 index 000000000..e5699bce7 --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json @@ -0,0 +1,364 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-mysql-example", + "annotations": { + "description": "An example CakePHP application with a MySQL database", + "tags": "instant-app,php,cakephp,mysql", + "iconClass": "icon-php" + } + }, + "labels": { + "template": "cakephp-mysql-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "cakephp-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "cakephp-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "php:5.5" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "cakephp-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling", + "recreateParams": { + "pre": { + "failurePolicy": "Abort", + "execNewPod": { + "command": [ + "./migrate-database.sh" + ], + "containerName": "cakephp-example" + } + } + } + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "cakephp-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "cakephp-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "cakephp-frontend" + }, + "template": { + "metadata": { + "name": "cakephp-frontend", + "labels": { + "name": "cakephp-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "cakephp-example", + "image": "cakephp-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "DATABASE_ENGINE", + "value": "${DATABASE_ENGINE}" + }, + { + "name": "DATABASE_NAME", + "value": "${DATABASE_NAME}" + }, + { + "name": "DATABASE_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "DATABASE_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "CAKEPHP_SECRET_TOKEN", + "value": "${CAKEPHP_SECRET_TOKEN}" + }, + { + "name": "CAKEPHP_SECURITY_SALT", + "value": "${CAKEPHP_SECURITY_SALT}" + }, + { + "name": "CAKEPHP_SECURITY_CIPHER_SEED", + "value": "${CAKEPHP_SECURITY_CIPHER_SEED}" + } + ] + } + ] + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Exposes the database server" + } + }, + "spec": { + "ports": [ + { + "name": "mysql", + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Defines how to deploy the database" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mysql", + "image": "openshift/mysql-55-centos7", + "ports": [ + { + "containerPort": 3306 + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DATABASE_NAME}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/cakephp-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the CakePHP service", + "value": "cakephp-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mysql" + }, + { + "name": "DATABASE_ENGINE", + "description": "Database engine: postgresql, mysql or sqlite (default)", + "value": "mysql" + }, + { + "name": "DATABASE_NAME", + "description": "Database name", + "value": "default" + }, + { + "name": "DATABASE_USER", + "description": "Database user name", + "value": "cakephp" + }, + { + "name": "DATABASE_PASSWORD", + "description": "Database user password", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "CAKEPHP_SECRET_TOKEN", + "description": "Set this to a long random string", + "generate": "expression", + "from": "[\\w]{50}" + }, + { + "name": "CAKEPHP_SECURITY_SALT", + "description": "Security salt for session hash", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "CAKEPHP_SECURITY_CIPHER_SEED", + "description": "Security cipher seed for session hash", + "generate": "expression", + "from": "[0-9]{30}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json new file mode 100644 index 000000000..09521add4 --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json @@ -0,0 +1,266 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-example", + "annotations": { + "description": "An example CakePHP application with no database", + "tags": "instant-app,php,cakephp", + "iconClass": "icon-php" + } + }, + "labels": { + "template": "cakephp-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "cakephp-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "cakephp-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "php:5.5" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "cakephp-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "cakephp-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "cakephp-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "cakephp-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "cakephp-frontend" + }, + "template": { + "metadata": { + "name": "cakephp-frontend", + "labels": { + "name": "cakephp-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "cakephp-example", + "image": "cakephp-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "DATABASE_ENGINE", + "value": "${DATABASE_ENGINE}" + }, + { + "name": "DATABASE_NAME", + "value": "${DATABASE_NAME}" + }, + { + "name": "DATABASE_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "DATABASE_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "CAKEPHP_SECRET_TOKEN", + "value": "${CAKEPHP_SECRET_TOKEN}" + }, + { + "name": "CAKEPHP_SECURITY_SALT", + "value": "${CAKEPHP_SECURITY_SALT}" + }, + { + "name": "CAKEPHP_SECURITY_CIPHER_SEED", + "value": "${CAKEPHP_SECURITY_CIPHER_SEED}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/cakephp-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the CakePHP service", + "value": "cakephp-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name" + }, + { + "name": "DATABASE_ENGINE", + "description": "Database engine: postgresql, mysql or sqlite (default)" + }, + { + "name": "DATABASE_NAME", + "description": "Database name" + }, + { + "name": "DATABASE_USER", + "description": "Database user name" + }, + { + "name": "DATABASE_PASSWORD", + "description": "Database user password" + }, + { + "name": "CAKEPHP_SECRET_TOKEN", + "description": "Set this to a long random string", + "generate": "expression", + "from": "[\\w]{50}" + }, + { + "name": "CAKEPHP_SECURITY_SALT", + "description": "Security salt for session hash", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "CAKEPHP_SECURITY_CIPHER_SEED", + "description": "Security cipher seed for session hash", + "generate": "expression", + "from": "[0-9]{30}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json new file mode 100644 index 000000000..fc92a1d6c --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json @@ -0,0 +1,334 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "dancer-mysql-example", + "annotations": { + "description": "An example Dancer application with a MySQL database", + "tags": "instant-app,perl,dancer,mysql", + "iconClass": "icon-perl" + } + }, + "labels": { + "template": "dancer-mysql-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "dancer-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "dancer-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "dancer-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "dancer-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "dancer-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "dancer-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "perl:5.16" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "dancer-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "dancer-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "dancer-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "dancer-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "dancer-frontend" + }, + "template": { + "metadata": { + "name": "dancer-frontend", + "labels": { + "name": "dancer-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "dancer-example", + "image": "dancer-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "MYSQL_USER", + "value": "${MYSQL_USER}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${MYSQL_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${MYSQL_DATABASE}" + }, + { + "name": "SECRET_KEY_BASE", + "value": "${SECRET_KEY_BASE}" + } + ] + } + ] + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Exposes the database server" + } + }, + "spec": { + "ports": [ + { + "name": "mysql", + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Defines how to deploy the database" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mysql", + "image": "openshift/mysql-55-centos7", + "ports": [ + { + "containerPort": 3306 + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${MYSQL_USER}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${MYSQL_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${MYSQL_DATABASE}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/dancer-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Dancer service", + "value": "dancer-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "ADMIN_USERNAME", + "description": "administrator username", + "generate": "expression", + "from": "admin[A-Z0-9]{3}" + }, + { + "name": "ADMIN_PASSWORD", + "description": "administrator password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "database" + }, + { + "name": "MYSQL_USER", + "description": "database username", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MYSQL_PASSWORD", + "description": "database password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "MYSQL_DATABASE", + "description": "database name", + "value": "sampledb" + }, + { + "name": "SECRET_KEY_BASE", + "description": "Your secret key for verifying the integrity of signed cookies", + "generate": "expression", + "from": "[a-z0-9]{127}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json new file mode 100644 index 000000000..829f50bae --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json @@ -0,0 +1,200 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "dancer-example", + "annotations": { + "description": "An example Dancer application with no database", + "tags": "instant-app,perl,dancer", + "iconClass": "icon-perl" + } + }, + "labels": { + "template": "dancer-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "dancer-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "dancer-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "dancer-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "dancer-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "dancer-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "dancer-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "perl:5.16" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "dancer-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "dancer-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "dancer-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "dancer-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "dancer-frontend" + }, + "template": { + "metadata": { + "name": "dancer-frontend", + "labels": { + "name": "dancer-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "dancer-example", + "image": "dancer-example", + "ports": [ + { + "containerPort": 8080 + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/dancer-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Dancer service", + "value": "dancer-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "SECRET_KEY_BASE", + "description": "Your secret key for verifying the integrity of signed cookies", + "generate": "expression", + "from": "[a-z0-9]{127}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json new file mode 100644 index 000000000..c46476e8a --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json @@ -0,0 +1,341 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "django-postgresql-example", + "annotations": { + "description": "An example Django application with a PostgreSQL database", + "tags": "instant-app,python,django,postgresql", + "iconClass": "icon-python" + } + }, + "labels": { + "template": "django-postgresql-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "django-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "django-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "django-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "django-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "django-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "django-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "python:3.3" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "django-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "django-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "django-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "django-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "django-frontend" + }, + "template": { + "metadata": { + "name": "django-frontend", + "labels": { + "name": "django-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "django-example", + "image": "django-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "DATABASE_ENGINE", + "value": "${DATABASE_ENGINE}" + }, + { + "name": "DATABASE_NAME", + "value": "${DATABASE_NAME}" + }, + { + "name": "DATABASE_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "DATABASE_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "APP_CONFIG", + "value": "${APP_CONFIG}" + }, + { + "name": "DJANGO_SECRET_KEY", + "value": "${DJANGO_SECRET_KEY}" + } + ] + } + ] + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Exposes the database server" + } + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Defines how to deploy the database" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": "openshift/postgresql-92-centos7", + "ports": [ + { + "containerPort": 5432 + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DATABASE_NAME}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/django-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Django service", + "value": "django-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "postgresql" + }, + { + "name": "DATABASE_ENGINE", + "description": "Database engine: postgresql, mysql or sqlite (default)", + "value": "postgresql" + }, + { + "name": "DATABASE_NAME", + "description": "Database name", + "value": "default" + }, + { + "name": "DATABASE_USER", + "description": "Database user name", + "value": "django" + }, + { + "name": "DATABASE_PASSWORD", + "description": "Database user password", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "APP_CONFIG", + "description": "Relative path to Gunicorn configuration file (optional)" + }, + { + "name": "DJANGO_SECRET_KEY", + "description": "Set this to a long random string", + "generate": "expression", + "from": "[\\w]{50}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django.json b/roles/openshift_examples/files/examples/quickstart-templates/django.json new file mode 100644 index 000000000..74bbea163 --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/django.json @@ -0,0 +1,254 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "django-example", + "annotations": { + "description": "An example Django application with no database", + "tags": "instant-app,python,django", + "iconClass": "icon-python" + } + }, + "labels": { + "template": "django-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "django-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "django-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "django-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "django-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "django-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "django-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "python:3.3" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "django-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "django-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "django-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "django-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "django-frontend" + }, + "template": { + "metadata": { + "name": "django-frontend", + "labels": { + "name": "django-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "django-example", + "image": "django-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "DATABASE_ENGINE", + "value": "${DATABASE_ENGINE}" + }, + { + "name": "DATABASE_NAME", + "value": "${DATABASE_NAME}" + }, + { + "name": "DATABASE_USER", + "value": "${DATABASE_USER}" + }, + { + "name": "DATABASE_PASSWORD", + "value": "${DATABASE_PASSWORD}" + }, + { + "name": "APP_CONFIG", + "value": "${APP_CONFIG}" + }, + { + "name": "DJANGO_SECRET_KEY", + "value": "${DJANGO_SECRET_KEY}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/django-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Django service", + "value": "django-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name" + }, + { + "name": "DATABASE_ENGINE", + "description": "Database engine: postgresql, mysql or sqlite (default)" + }, + { + "name": "DATABASE_NAME", + "description": "Database name" + }, + { + "name": "DATABASE_USER", + "description": "Database user name" + }, + { + "name": "DATABASE_PASSWORD", + "description": "Database user password" + }, + { + "name": "APP_CONFIG", + "description": "Relative path to Gunicorn configuration file (optional)" + }, + { + "name": "DJANGO_SECRET_KEY", + "description": "Set this to a long random string", + "generate": "expression", + "from": "[\\w]{50}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json new file mode 100644 index 000000000..cd9e5faf0 --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json @@ -0,0 +1,329 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-mongodb-example", + "annotations": { + "description": "An example Node.js application with a MongoDB database", + "tags": "instant-app,nodejs,mongodb", + "iconClass": "icon-nodejs" + } + }, + "labels": { + "template": "nodejs-mongodb-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "nodejs-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "nodejs-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "nodejs:0.10" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "nodejs-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "nodejs-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "nodejs-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "nodejs-frontend" + }, + "template": { + "metadata": { + "name": "nodejs-frontend", + "labels": { + "name": "nodejs-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "nodejs-example", + "image": "nodejs-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ] + } + ] + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Exposes the database server" + } + }, + "spec": { + "ports": [ + { + "name": "mongodb", + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Defines how to deploy the database" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mongodb", + "image": "openshift/mongodb-24-centos7", + "ports": [ + { + "containerPort": 27017 + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/nodejs-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Node.js service", + "value": "nodejs-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "mongodb" + }, + { + "name": "MONGODB_USER", + "description": "Username for MongoDB user that will be used for accessing the database", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "MONGODB_PASSWORD", + "description": "Password for the MongoDB user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + }, + { + "name": "MONGODB_DATABASE", + "description": "Database name", + "value": "sampledb" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "description": "Password for the database admin user", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json new file mode 100644 index 000000000..ff7dd574e --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json @@ -0,0 +1,236 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-example", + "annotations": { + "description": "An example Node.js application with no database", + "tags": "instant-app,nodejs", + "iconClass": "icon-nodejs" + } + }, + "labels": { + "template": "nodejs-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "nodejs-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "nodejs-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "nodejs:0.10" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "nodejs-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "nodejs-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "nodejs-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "nodejs-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "nodejs-frontend" + }, + "template": { + "metadata": { + "name": "nodejs-frontend", + "labels": { + "name": "nodejs-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "nodejs-example", + "image": "nodejs-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/nodejs-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Node.js service", + "value": "nodejs-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name" + }, + { + "name": "MONGODB_USER", + "description": "Username for MongoDB user that will be used for accessing the database" + }, + { + "name": "MONGODB_PASSWORD", + "description": "Password for the MongoDB user" + }, + { + "name": "MONGODB_DATABASE", + "description": "Database name" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "description": "Password for the database admin user" + } + ] +} diff --git a/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json new file mode 100644 index 000000000..ec7da77e3 --- /dev/null +++ b/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json @@ -0,0 +1,388 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "rails-postgresql-example", + "annotations": { + "description": "An example Rails application with a PostgreSQL database", + "tags": "instant-app,ruby,rails,postgresql", + "iconClass": "icon-ruby" + } + }, + "labels": { + "template": "rails-postgresql-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "rails-frontend", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "rails-frontend" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "rails-route" + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "rails-frontend" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "rails-example", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "rails-example", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "openshift", + "name": "ruby:2.0" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "rails-example:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "rails-frontend", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Recreate", + "recreateParams": { + "pre": { + "failurePolicy": "Abort", + "execNewPod": { + "command": [ + "./migrate-database.sh" + ], + "containerName": "rails-example" + } + } + } + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "rails-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "rails-example:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "rails-frontend" + }, + "template": { + "metadata": { + "name": "rails-frontend", + "labels": { + "name": "rails-frontend" + } + }, + "spec": { + "containers": [ + { + "name": "rails-example", + "image": "rails-example", + "ports": [ + { + "containerPort": 8080 + } + ], + "env": [ + { + "name": "DATABASE_SERVICE_NAME", + "value": "${DATABASE_SERVICE_NAME}" + }, + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + }, + { + "name": "SECRET_KEY_BASE", + "value": "${SECRET_KEY_BASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + }, + { + "name": "SECRET_KEY_BASE", + "value": "${SECRET_KEY_BASE}" + }, + { + "name": "APPLICATION_DOMAIN", + "value": "${APPLICATION_DOMAIN}" + }, + { + "name": "APPLICATION_USER", + "value": "${APPLICATION_USER}" + }, + { + "name": "APPLICATION_PASSWORD", + "value": "${APPLICATION_PASSWORD}" + } + ] + } + ] + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Exposes the database server" + } + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "description": "Defines how to deploy the database" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": "openshift/postgresql-92-centos7", + "ports": [ + { + "containerPort": 5432 + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "SOURCE_REPOSITORY_URL", + "description": "The URL of the repository with your application source code", + "value": "https://github.com/openshift/rails-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch" + }, + { + "name": "CONTEXT_DIR", + "description": "Set this to the relative path to your project if it is not in the root of your repository" + }, + { + "name": "APPLICATION_DOMAIN", + "description": "The exposed hostname that will route to the Rails service", + "value": "rails-example.openshiftapps.com" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "description": "A secret string used to configure the GitHub webhook", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "SECRET_KEY_BASE", + "description": "Your secret key for verifying the integrity of signed cookies", + "generate": "expression", + "from": "[a-z0-9]{127}" + }, + { + "name": "APPLICATION_USER", + "description": "The application user that is used within the sample application to authorize access on pages", + "value": "openshift" + }, + { + "name": "APPLICATION_PASSWORD", + "description": "The application password that is used within the sample application to authorize access on pages", + "value": "secret" + }, + { + "name": "DATABASE_SERVICE_NAME", + "description": "Database service name", + "value": "postgresql" + }, + { + "name": "POSTGRESQL_USER", + "description": "database username", + "generate": "expression", + "from": "user[A-Z0-9]{3}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "description": "database password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "POSTGRESQL_DATABASE", + "description": "database name", + "value": "root" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "description": "database max connections", + "value": "10" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "description": "database shared buffers", + "value": "12MB" + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json new file mode 100644 index 000000000..425cc3e0f --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json @@ -0,0 +1,157 @@ +{ + "kind": "List", + "apiVersion": "v1", + "metadata": { + "name": "jboss-image-streams", + "annotations": { + "description": "ImageStream definitions for JBoss Middleware products." + } + }, + "items": [ + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-webserver3-tomcat7-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat7-openshift", + "tags": [ + { + "name": "3.0", + "annotations": { + "description": "JBoss Web Server v3 Tomcat 7 STI images.", + "iconClass": "icon-jboss", + "tags": "java", + "supports":"tomcat7:3.0,java", + "version": "3.0" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-webserver3-tomcat8-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat8-openshift", + "tags": [ + { + "name": "3.0", + "annotations": { + "description": "JBoss Web Server v3 Tomcat 8 STI images.", + "iconClass": "icon-jboss", + "tags": "java", + "supports":"tomcat8:3.0,java", + "version": "3.0" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-eap6-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap-openshift", + "tags": [ + { + "name": "6.4", + "annotations": { + "description": "JBoss EAP 6 STI images.", + "iconClass": "icon-jboss", + "tags": "javaee", + "supports":"eap:6.4,jee,java", + "version": "6.4" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-amq-6" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq-openshift:6.2", + "tags": [ + { + "name": "6.2", + "annotations": { + "description": "JBoss ActiveMQ 6 broker image.", + "iconClass": "icon-jboss", + "tags": "javaee", + "supports":"amq:6.2,jee,java", + "version": "6.2" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-mysql-55", + "annotations": { + "description": "Provides MySQL 5.5 images for use with JBoss Middleware products." + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7", + "tags": [ + { + "name": "latest", + "dockerImageReference": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest" + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-postgresql-92", + "annotations": { + "description": "Provides PostgreSQL 9.2 images for use with JBoss Middleware products." + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7", + "tags": [ + { + "name": "latest", + "dockerImageReference": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest" + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-mongodb-24", + "annotations": { + "description": "Provides MongoDB 2.4 images for use with JBoss Middleware products." + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7", + "tags": [ + { + "name": "latest", + "dockerImageReference": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest" + } + ] + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json new file mode 100644 index 000000000..00b63ce8c --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json @@ -0,0 +1,399 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for ActiveMQ brokers using persistent storage." + }, + "name": "amq6-persistent" + }, + "labels": { + "template": "amq6-persistent" + }, + "parameters": [ + { + "description": "ActiveMQ Release version, e.g. 6.2, etc.", + "name": "AMQ_RELEASE", + "value": "6.2" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "broker" + }, + { + "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "name": "MQ_PROTOCOL", + "value": "openwire" + }, + { + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "Broker user name", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Broker user password", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin User", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin Password", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5672, + "targetPort": 5672 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's amqp port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5671, + "targetPort": 5671 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's amqp ssl port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 1883, + "targetPort": 1883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's mqtt port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61613, + "targetPort": 61613 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's stomp port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61612, + "targetPort": 61612 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's stomp ssl port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp (openwire) port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61617, + "targetPort": 61617 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp ssl (openwire) port." + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-amq-6:${AMQ_RELEASE}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "registry.access.redhat.com/jboss-amq-6/amq-openshift:${AMQ_RELEASE}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-amq-amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/opt/amq/data/kahadb", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_PROTOCOLS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-amq-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json b/roles/openshift_examples/files/examples/xpaas-templates/amq6.json new file mode 100644 index 000000000..0bb1b0651 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq6.json @@ -0,0 +1,366 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for ActiveMQ brokers." + }, + "name": "amq6" + }, + "labels": { + "template": "amq6" + }, + "parameters": [ + { + "description": "ActiveMQ Release version, e.g. 6.2, etc.", + "name": "AMQ_RELEASE", + "value": "6.2" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "broker" + }, + { + "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "name": "MQ_PROTOCOL", + "value": "openwire" + }, + { + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "" + }, + { + "description": "Broker user name", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Broker user password", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin User", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin Password", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5672, + "targetPort": 5672 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's amqp port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5671, + "targetPort": 5671 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's amqp ssl port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 1883, + "targetPort": 1883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's mqtt port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61613, + "targetPort": 61613 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's stomp port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61612, + "targetPort": 61612 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's stomp ssl port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp (openwire) port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61617, + "targetPort": 61617 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp ssl (openwire) port." + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-amq-6:${AMQ_RELEASE}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "registry.access.redhat.com/jboss-amq-6/amq-openshift:${AMQ_RELEASE}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-amq-amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_PROTOCOLS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap-app-secret.json b/roles/openshift_examples/files/examples/xpaas-templates/eap-app-secret.json new file mode 100644 index 000000000..cfe038048 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap-app-secret.json @@ -0,0 +1,32 @@ +{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "eap-service-account" + }, + "secrets": [ + { + "name": "eap-app-secret" + } + ] + }, + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Default secret file with name 'jboss' and password 'mykeystorepass'" + }, + "name": "eap-app-secret" + }, + "data": { + "keystore.jks": "/u3+7QAAAAIAAAABAAAAAQAFamJvc3MAAAFNbVtLLAAABQMwggT/MA4GCisGAQQBKgIRAQEFAASCBOsxl4wqa+E+XP8+qMZY9XLhvKrRX8V1MHdwFZQaLTEVURCizqYXoMnbhtfV0oMAUFsE7013TTA9Q2l+pSs+cqz6HH/vwjEEIkqJx5wD8WcD/bu9e9F9EHQ+zrjZFmpMFvXsvj9+ux1o/YLBDGY3kd4MoDcJy0yJ/ZpzNYLkXanlrMhWqxC7MAliCBsdyVgNn5RFb4Nn+JZgJuNSIGo/K292+0IFaFv9vsXbX889W9HPCvfO0mQIzoy8In0NhzdKli/67y4kbDkWaI0fRONckZTxNpxn6rMc0nN9zKrGVToLxj1Ufcoj/tCvR8agtPpv7KIWUqBYDg83ad+i4EE5XYISovlsl6RmtrrTb39PJcL86+wJ+x2ZrLuyzh6C9sAOdSBiKt/DY97ICIYltRMrb+cNwWdnJvT+PeYvv3vKo7YThha+akoJDjsWMp1HWpbIC9zg9ZjugU+/ao6nHtmoZmCaYjLuEE+sYl5s179uyQjE3LRc+0cVY2+bYCOD6P6JLH9GdfjkR40OhjryiWy2Md6vAGaATh6kjjreRHfSie4KCgIZx9Ngb1+uAwauYSM8d9OIwT5lRmLd4Go9CaFXtFdq/IZv3x5ZEPVqMjxcq0KXcs1QcfK3oSYL/rrkxXxKFTrd0N3KgvwATWx/KS90tdHBg65dF3PpBjK1AYQL3Q7KV3t45SVyYHd92TUsaduY1nUQk4TukNC8l9f8xYVeOFXoFHZRx9edqn8fjDMmCYn5PTPNuMPHQm7nKxeWhV2URY5jt774gmvHLNcXeEgrM7US81wOvs2y1jY/paJWn+OACf2x2a75MWFFkZH67bZoh9pPWAwOUEtegXTL5QVicHjzZrop8Qb7K7hlGgD0RP5YYOFYF4DD+SL5BHKr6fw/LS6MMJaK1wKsJd0oGg9HcHXjph9Kb+mqXrQ54C1KI42LpFftU3DCg8wGoqvg/zO/UtVeHX3rBZDUIkeQrCULEkki9oL5diDxe9mNx9Qua5FJ6FJGIffQmsC4b0+Xys6NyqUu1aeWLcAPA/5hcs6ZTiSRTHTBe3vxapyBjnAL5uij4ILbWbEGH1e0mAHBeiihRx+w4oxH4OGCvXOhwIDHETLJJUcnJe1CouECdqdfVy/eEsIfiEheVs8OwogJLiWgzB7PoebXM4SKsAWL3NcDtC1LV3KuPgFuTDH7MjPIR83eSxkKlJLMNGfEpUHyg+lm7aJ98PVIS+l1YV9oUzLfbo3S6S2sMjVgyviS90vNIPo5JOTEFHsg5aWJNHL0OV4zRUeILzwwdQz+VkTk9DobnkLWUeLnwUNWheOpaQh79Mk0IfwfLj4D0Vx9p+PShKKZCGs0wjckmCFBM5Pc1x2lwMdaP5yATzrw+jUc+/3UY4PF/4Ya66m/DRsBKEcXjVAHcTce6OdNdGlBNT8VgkxPiylwO8hvyvpf6j+wdb9iXi6eOnk0AiEJ6mUAXs/eyDD/cqQjnUBKRGLQUSdHhvtpw8RfvyVhAAxNOnBsOT0WYol9iK6pSclGTF5mZleASRzZhH69GgdebfFhXimb0j/wYj3uLgf6mrKMDwlrXJ80SiWkXxd5TX/7XtB9lbPzNpaR12M8U8UVg16VOtMwCR2Gss2vmhqQnQFLsUsAKcYM0TRp1pWqbzpGebCvJkVWiIYocN3ZI1csAhGX3G86ewAAAAEABVguNTA5AAADeTCCA3UwggJdoAMCAQICBGekovEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk5DMRAwDgYDVQQHEwdSYWxlaWdoMRYwFAYDVQQKEw1teWNvbXBhbnkuY29tMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEPMA0GA1UEAxMGanNtaXRoMB4XDTE1MDUxOTE4MDYxOFoXDTE1MDgxNzE4MDYxOFowazELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk5DMRAwDgYDVQQHEwdSYWxlaWdoMRYwFAYDVQQKEw1teWNvbXBhbnkuY29tMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEPMA0GA1UEAxMGanNtaXRoMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0zbGtem+If//jw0OTszIcpX4ydOCC0PeqktulYkm4pG0qEVBB+HuMj7yeTBc1KCDl2xm+Q6LPeTzUufk7BXFEg4Ru1l3PSW70LyJBfHy5ns0dYE5M1I0Avv9rvjgC1VTsiBmdXh+tIIQDPknIKpWpcs79XPOURGLvuGjfyj08EZWFvAZzYrk3lKwkceDHpYYb5i+zxFRz5K6of/h9gQ9CzslqNd7uxxvyy/yTtNFk2J797Vk3hKtbiATqc9+egEHcEQrzADejPYol5ke3DA1NPRBqFGku5n215i2eYzYvVV1xmifID/3lzvNWN0bWlOxl74VsPnWa/2JPP3hZ6p5QIDAQABoyEwHzAdBgNVHQ4EFgQURLJKk/gaSrMjDyX8iYtCzPtTBqAwDQYJKoZIhvcNAQELBQADggEBAA4ESTKsWevv40hFv11t+lGNHT16u8Xk+WnvB4Ko5sZjVhvRWTTKOEBE5bDYfMhf0esn8gg0B4Qtm4Rb5t9PeaG/0d6xxD0BIV6eWihJVtEGOH47Wf/UzfC88fqoIxZ6MMBPik/WeafvOK+HIHfZSwAmqlXgl4nNVDdMNHtBhNAvikL3osxrSbqdi3eyI7rqSpb41Lm9v+PF+vZTOGRQf22Gq30/Ie85DlqugtRKimWHJYL2HeL4ywTtQKgde6JDRCOHwbDcsl6CbMjugt3yyI7Yo9EJdKb5p6YoVOpnCz7369W9Uim+Xrl2ELZWM5WTiQFxd6S36Ql2TUk+s8zj/GoN9ov0Y/yNNCxAibwyzo94N+Q4vA==" + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json new file mode 100644 index 000000000..2a9c06bee --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json @@ -0,0 +1,643 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 A-MQ applications with persistent storage built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-amq-persistent-sti" + }, + "labels": { + "template": "eap6-amq-persistent-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "ActiveMQ Release version, e.g. 6.2, etc.", + "name": "AMQ_RELEASE", + "value": "6.2" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", + "name": "MQ_JNDI", + "value": "java:/ConnectionFactory" + }, + { + "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "name": "MQ_PROTOCOL", + "value": "openwire" + }, + { + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Broker user name", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Broker user password", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin User", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin Password", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp (openwire) port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MQ_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-amq=MQ" + }, + { + "name": "MQ_JNDI", + "value": "${MQ_JNDI}" + }, + { + "name": "MQ_USERNAME", + "value": "${MQ_USERNAME}" + }, + { + "name": "MQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "MQ_PROTOCOL", + "value": "tcp" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-amq-6:${AMQ_RELEASE}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "registry.access.redhat.com/jboss-amq-6/amq-openshift:${AMQ_RELEASE}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-amq-amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/opt/amq/data/kahadb", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_PROTOCOLS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-amq-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json new file mode 100644 index 000000000..e96eef6f7 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json @@ -0,0 +1,606 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 A-MQ applications built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-amq-sti" + }, + "labels": { + "template": "eap6-amq-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "ActiveMQ Release version, e.g. 6.2, etc.", + "name": "AMQ_RELEASE", + "value": "6.2" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", + "name": "MQ_JNDI", + "value": "java:/ConnectionFactory" + }, + { + "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "name": "MQ_PROTOCOL", + "value": "openwire" + }, + { + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Broker user name", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Broker user password", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin User", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "ActiveMQ Admin Password", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's tcp (openwire) port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MQ_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-amq=MQ" + }, + { + "name": "MQ_JNDI", + "value": "${MQ_JNDI}" + }, + { + "name": "MQ_USERNAME", + "value": "${MQ_USERNAME}" + }, + { + "name": "MQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "MQ_PROTOCOL", + "value": "tcp" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-amq-6:${AMQ_RELEASE}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "registry.access.redhat.com/jboss-amq-6/amq-openshift:${AMQ_RELEASE}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-amq-amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-amq-tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_PROTOCOLS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json new file mode 100644 index 000000000..7148d8fd7 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json @@ -0,0 +1,405 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-jboss", + "description": "Application template for EAP 6 applications built using STI." + }, + "name": "eap6-basic-sti" + }, + "labels": { + "template": "eap6-basic-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json new file mode 100644 index 000000000..03cfbb11e --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json @@ -0,0 +1,619 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 MongDB applications with persistent storage built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-mongodb-persistent-sti" + }, + "labels": { + "template": "eap6-mongodb-persistent-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json new file mode 100644 index 000000000..39f5a5a62 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json @@ -0,0 +1,582 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 MongDB applications built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-mongodb-sti" + }, + "labels": { + "template": "eap6-mongodb-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json new file mode 100644 index 000000000..0fa4421c6 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json @@ -0,0 +1,625 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 MySQL applications with persistent storage built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-mysql-persistent-sti" + }, + "labels": { + "template": "eap6-mysql-persistent-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json new file mode 100644 index 000000000..981e16cef --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json @@ -0,0 +1,588 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 MySQL applications built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-mysql-sti" + }, + "labels": { + "template": "eap6-mysql-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json new file mode 100644 index 000000000..409ba5165 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json @@ -0,0 +1,601 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-postgresql-persistent-sti" + }, + "labels": { + "template": "eap6-postgresql-persistent-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json new file mode 100644 index 000000000..c2ca18f9d --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json @@ -0,0 +1,564 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 6 PostgreSQL applications built using STI.", + "iconClass" : "icon-jboss" + }, + "name": "eap6-postgresql-sti" + }, + "labels": { + "template": "eap6-postgresql-sti" + }, + "parameters": [ + { + "description": "EAP Release version, e.g. 6.4, etc.", + "name": "EAP_RELEASE", + "value": "6.4" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "eap-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "" + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "" + }, + { + "description": "The name of the secret containing the keystore file", + "name": "EAP_HTTPS_SECRET", + "value": "eap-app-secret" + }, + { + "description": "The name of the keystore file within the secret", + "name": "EAP_HTTPS_KEYSTORE", + "value": "keystore.jks" + }, + { + "description": "The name associated with the server certificate", + "name": "EAP_HTTPS_NAME", + "value": "" + }, + { + "description": "The password for the keystore and certificate", + "name": "EAP_HTTPS_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8888, + "targetPort": 8888 + } + ], + "portalIP": "None", + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-ping", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Ping service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-eap6-openshift:${EAP_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "eap-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-ping-8888", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", + "value": "${APPLICATION_NAME}-ping" + }, + { + "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", + "value": "8888" + }, + { + "name": "EAP_HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "EAP_HTTPS_KEYSTORE", + "value": "${EAP_HTTPS_KEYSTORE}" + }, + { + "name": "EAP_HTTPS_NAME", + "value": "${EAP_HTTPS_NAME}" + }, + { + "name": "EAP_HTTPS_PASSWORD", + "value": "${EAP_HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${EAP_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-app-secret.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-app-secret.json new file mode 100644 index 000000000..c24e4ed8b --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-app-secret.json @@ -0,0 +1,33 @@ +{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "jws-service-account" + }, + "secrets": [ + { + "name": "jws-app-secret" + } + ] + }, + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Default secret files with password 'mycertpass'" + }, + "name": "jws-app-secret" + }, + "data": { + "server.crt": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lKQUlVcmJBS2pjd3lkTUEwR0NTcUdTSWIzRFFFQkJRVUFNQ3d4Q3pBSkJnTlYKQkFZVEFrTkJNUXN3Q1FZRFZRUUlFd0pDUXpFUU1BNEdBMVVFQ2hNSFVtVmtJRWhoZERBZUZ3MHhOVEExTWpNdwpNalE0TWpCYUZ3MHhPREExTWpJd01qUTRNakJhTUN3eEN6QUpCZ05WQkFZVEFrTkJNUXN3Q1FZRFZRUUlFd0pDClF6RVFNQTRHQTFVRUNoTUhVbVZrSUVoaGREQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0MKZ2dFQkFMejlGcWYwbFV0eHBXbFlqQVF3MGNPVU5NMmF0SjFjWmMybEx5UEh3NVVYMkNna29jaHhuY0ZhSDZsYwpSeWVvUC9KQXVlM2d2cjJ0ZURicTRGMitRRWRFL3hrM0xLZEVhVXhwaGgvaVpDYUg0VnlXdzE3Y01aaS83UFUxCkhTak1waEVoK3MwQ1JIZnBCSVZDZ2hFQjZCekhYazNLSHU1K0JsRTYzRDVOSXJuNVBiRGhwUG5VaG1vY05HNU0KdFlEZE9mRHphTVdzcWU2ZGFkL0c3eXpDeVZYMnA2RllIbEdvQWpDTkVPdkFtTEFtMVcwNDdOOGV0QjIybFA3NQozK2dYby83T2Q1WHJJU0xLWTBNZ0puaDdmVEh1VVdhWEIxSEZ0YmFReXVreGVYZG1wRFR4VkdzenhHVXIxemd4ClU4VVFnMjFGOFhONjVydGNScVdCNWNFQXcrRUNBd0VBQWFPQmpqQ0JpekFkQmdOVkhRNEVGZ1FVVXJZUm5NOXIKemNGUS9QMFk4VlhzUm1OWVlUUXdYQVlEVlIwakJGVXdVNEFVVXJZUm5NOXJ6Y0ZRL1AwWThWWHNSbU5ZWVRTaApNS1F1TUN3eEN6QUpCZ05WQkFZVEFrTkJNUXN3Q1FZRFZRUUlFd0pDUXpFUU1BNEdBMVVFQ2hNSFVtVmtJRWhoCmRJSUpBSVVyYkFLamN3eWRNQXdHQTFVZEV3UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUZCUUFEZ2dFQkFEeGcKckh1NFpoeXU4UjlZWGpLZVBXaXUxQnZYUVhJRG5DQVZiZXlMNHZ5WHczdFZTeng1S3BuaHprTm5zemxpS1RQRAozOWNNbTJ0VTBUcGpHRVZ1TDVTR0R5V3JRakNQWDY2NjZpdjhzNkpVYUxmdnBPNlduN0wzVXVMVnFoMmpVM2FOCkU2TDB4UEg4SWRzc3lGZ3Rac3BueVlxZ0cvYUoxUEtmZDYweXRtK0hveUYyZW5xZnVKaHNKNnp3QUVZdi9vUCsKcXhYK04yZDJEVmt2RjVRZ3llSjZiYWd1M0QxbWRpbE5pWDliZmVHZ3ptT1JXaHBYNkVqdnFqeTZ4TllqQ2IxbAo1anVzdHhHZlFGSm1UOTdaZnZQK1kxeTJvUkZFc01BVy9BNWNrai92SnlUUlZIeWZZSG1qeW82SFhSdzRWdERNCjJtOWZqOFhYTDh5TlZha2h6dE09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + "server.key": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpQcm9jLVR5cGU6IDQsRU5DUllQVEVECkRFSy1JbmZvOiBERVMtRURFMy1DQkMsODRGOEVFQjcyN0FCNzc4RgoKS2lkL0tKZ2JyWE1YUGwwQXc4azI0dW1QcW53NUhQZHk3ZUZ2aG1NNEU3T1V2bkZYeWpMN2dQZ0JjTmxTbEx2Rgp6eXZYVWZwUXROQ2dUWWJvU0dWVFpkV01OL0ttcTNheUJTckNvWGg3MUFMWm1KVDF4aDNCcWUyMzVNRzZlR2NTCngrY3hudW0yMXNWRFBHUnBmZUx4VXA3WU5mOTNwYzBNaTUzSVdmT0IvM2FscDJlajI0RkRTVG1xM0NpVFR1b3cKcUc2MjdjREY2RVNhNFphZmFXelpOVDJnUStrejRGQkcwc1dJYWVod012SXJiQWFldGpUSUhQTmZraUtOcTF3Qwpma2h0NU9xRDU0RzVnR1ZYd0tXcGF4S2M0K24rcEtsZk1od2NPQ3Z5Y2hwQmtQbzk0T2E2bE1oVUk3Y0FkemVOCmI0TmNVWS9UekxpNjgyRjlFZUpsOXJuQ0N2N0RJcEZJbkg5bG4zUElVT2pCYVlySFpqY0pSK2xWOFhWRVJzcTAKZ0hyTUZ2WjRXYXZPclhVaFhxTm10UmNSOVpoS3pvVnB5MStIRUlONTZIL2huaFB5ZEJLb3pnbXFwSFJoWVZwNApNWEk0ZGtlMVZ5VmlRQUMxUzU3Wi81anFTZTN5akRyYTZJMThJRzNPSGNxeEpuWXNNOS9NdVBRZjlJNlJWTEx3CnVXcDNDdkIvNUJyWlNQWVFGbnhCT09hLzd3SWVtNjE2Z2ZnREdCRjZpc3BVNTlhbjlKQWtHYmhKT2h4RzYvRjYKZFpNZUZOa0lkRUwzR3pQRWNjNWpVTGJmSE5tWmVsODlYRUVpak96LzhvVEdHYUNyRkUxVVZKLytZSVVzK3lnOQpwRFJ6bkxyV2hwNGxNTnVLYjRNeXkySlkrWW9DOHhMVUtaYnNYcUxlN2k5QTJwYWtFUmdhazZaK1BEZUR2OWpYCnU0M3dzU0dwRTRUeDFkR2JsRzJDd0pPSW53WnpETUEwTDliWm8xemY4bUZGN0FmejlCbjNMOWMybTVJUDlZQ1AKQ1dVcFA4S3AzbEhzQkpQWWl4VTJ3TlZWSGs1Vjlla2R3aEl3Mlc3d0gyOTB6azZFTXJvZ2NoMjdKTFk0MlBzOApyRDl6Wm5vbEgrN1pPa3lLQ1UxVWJHWUJFSUtvZVFGa1FCek01aEpFT0xGVXd0YmdvZE5YYS9oMStXMjdkaHd1CnIyMG1tb3lCRGpNNWdGVVh6dGpqNDZMakZuNE5iRmtwbkNNT1VDNVM2bkIyeTlONnoxUWRSekVPRy80cU5xVFkKRzY3c2szemlqVzFISkxJVGorMVNPZHdIWktSUFl3cGxNTkY4TFRKVlBOYVQ5L3FDVGV3bXl4WUpIVjljVWhuSwpMQUZZYXNKVG9HZytEQnNJL21GbDY3WHNhWnp1cUdSSksydnVvNG9jWjJNb250L3BSSXRwQStaUFNpREFqTzNjCm9kTW5LRmhkRkpMSTZZTXVja1RySGQycXREQmdPaEEyZjk3ajArckVGeWVNOHY2eGhGN0I4cEtscW5TQU01M2kKbGl1ZHh1NzA2RWNQQ3JlU3pPRjF5bWMrQnBmVjVtSnU0U1hEcUF4aEYzZFFPbEg5TDdPVmJtRDU4dUs0eDNldwpXQ05Qa2FGc1UxRThJR2M4UGVqc1kwdHUxdWtoK3ZMYnlNQlBVRXRxaEgrNFhNeUVJL0xjUVhIRlkybjBEcmZDCmxTclM3T3FKcGhPbW1VSWFwNGZLK0QzcldzZmFpSm9wc2Yxc044MEdLeVptSFY1TWtEY0k3NXdTWWp1RXhvcGgKVW0wcmJxMDdVVVVFR0tYL1JIQWhWS21nek5JMEZpYUZtbkhHOUw0bWZwclFHd2J5dTR1RktLUGtjOGxodlVwRAp6Z29SMTZQSUM4THpvcE5qTmJPem0xV0RUd1FxaUt5NWRsVWlWZlFRRlNmd09vcDMzakhvQXUzVWxYU3Bwby9vCnJnbGIxUUtBdm5zUEVFaG9HZUZpenFIWXhLVjZpT1hET3ZCcFhOcC8yMVB0OHpRbWVXUXZRZmhyOEc0cEpkVVcKRTFBWlc1b2JWWG1seWc4OWRML0pMTG02VFVNdUZaZFgxRlRNaVphangvQklqekhFVjdRNkhJZWFHYjBRb2dFZwotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=" + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json new file mode 100644 index 000000000..bb5bbb134 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json @@ -0,0 +1,359 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS applications built using STI." + }, + "name": "jws-tomcat7-basic-sti" + }, + "labels": { + "template": "jws-tomcat7-basic-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json new file mode 100644 index 000000000..86d4d3d25 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json @@ -0,0 +1,573 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MongoDB applications with persistent storage built using STI." + }, + "name": "jws-tomcat7-mongodb-persistent-sti" + }, + "labels": { + "template": "jws-tomcat7-mongodb-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json new file mode 100644 index 000000000..696587cf3 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json @@ -0,0 +1,536 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MongoDB applications built using STI." + }, + "name": "jws-tomcat7-mongodb-sti" + }, + "labels": { + "template": "jws-tomcat7-mongodb-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json new file mode 100644 index 000000000..3ff5a712e --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json @@ -0,0 +1,574 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MySQL applications with persistent storage built using STI." + }, + "name": "jws-tomcat7-mysql-persistent-sti" + }, + "labels": { + "template": "jws-tomcat7-mysql-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json new file mode 100644 index 000000000..872e13f95 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json @@ -0,0 +1,537 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MySQL applications built using STI." + }, + "name": "jws-tomcat7-mysql-sti" + }, + "labels": { + "template": "jws-tomcat7-mysql-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json new file mode 100644 index 000000000..b22cce6fd --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json @@ -0,0 +1,550 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI." + }, + "name": "jws-tomcat7-postgresql-persistent-sti" + }, + "labels": { + "template": "jws-tomcat7-postgresql-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json new file mode 100644 index 000000000..43be3c3fe --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json @@ -0,0 +1,513 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications built using STI." + }, + "name": "jws-tomcat7-postgresql-sti" + }, + "labels": { + "template": "jws-tomcat7-postgresql-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json new file mode 100644 index 000000000..1d45b4214 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json @@ -0,0 +1,359 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS applications built using STI." + }, + "name": "jws-tomcat8-basic-sti" + }, + "labels": { + "template": "jws-tomcat8-basic-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json new file mode 100644 index 000000000..693cfaa0f --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json @@ -0,0 +1,573 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MongoDB applications with persistent storage built using STI." + }, + "name": "jws-tomcat8-mongodb-persistent-sti" + }, + "labels": { + "template": "jws-tomcat8-mongodb-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json new file mode 100644 index 000000000..8d27e8d94 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json @@ -0,0 +1,536 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MongoDB applications built using STI." + }, + "name": "jws-tomcat8-mongodb-sti" + }, + "labels": { + "template": "jws-tomcat8-mongodb-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC" + }, + { + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES" + }, + { + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mongodb-24:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "registry.access.redhat.com/openshift3_beta/mongodb-24-rhel7:latest", + "imagePullPolicy": "Always", + "ports": [ + { + "name": "${APPLICATION_NAME}-mongodb-tcp-27017", + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json new file mode 100644 index 000000000..1abf4e8cd --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json @@ -0,0 +1,574 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MySQL applications with persistent storage built using STI." + }, + "name": "jws-tomcat8-mysql-persistent-sti" + }, + "labels": { + "template": "jws-tomcat8-mysql-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json new file mode 100644 index 000000000..1944d3557 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json @@ -0,0 +1,537 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS MySQL applications built using STI." + }, + "name": "jws-tomcat8-mysql-sti" + }, + "labels": { + "template": "jws-tomcat8-mysql-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS" + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN" + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN" + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-mysql-55:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "registry.access.redhat.com/openshift3_beta/mysql-55-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-mysql-tcp-3306", + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json new file mode 100644 index 000000000..619895655 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json @@ -0,0 +1,550 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI." + }, + "name": "jws-tomcat8-postgresql-persistent-sti" + }, + "labels": { + "template": "jws-tomcat8-postgresql-persistent-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ "ReadWriteOnce" ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json new file mode 100644 index 000000000..0e269d53f --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json @@ -0,0 +1,513 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass" : "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications built using STI." + }, + "name": "jws-tomcat8-postgresql-sti" + }, + "labels": { + "template": "jws-tomcat8-postgresql-sti" + }, + "parameters": [ + { + "description": "JWS Release version, e.g. 3.0, 2.1, etc.", + "name": "JWS_RELEASE", + "value": "3.0" + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "jws-app" + }, + { + "description": "Hostname for service routes", + "name": "APPLICATION_HOSTNAME", + "value": "jws-app.local" + }, + { + "description": "Git source URI for application", + "name": "GIT_URI" + }, + { + "description": "Git branch/tag reference", + "name": "GIT_REF", + "value": "master" + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "GIT_CONTEXT_DIR", + "value": "" + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "" + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root" + }, + { + "description": "The name of the secret containing the certificate files", + "name": "JWS_HTTPS_SECRET", + "value": "jws-app-secret" + }, + { + "description": "The name of the certificate file within the secret", + "name": "JWS_HTTPS_CERTIFICATE", + "value": "server.crt" + }, + { + "description": "The name of the certificate key file within the secret", + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "server.key" + }, + { + "description": "The certificate password", + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "" + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS" + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS" + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression" + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin User", + "name": "JWS_ADMIN_USERNAME", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "JWS Admin Password", + "name": "JWS_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Github trigger secret", + "name": "GITHUB_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_TRIGGER_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression" + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-http-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-https-service", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http-route", + "metadata": { + "name": "${APPLICATION_NAME}-http-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-http-service" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https-route", + "metadata": { + "name": "${APPLICATION_NAME}-https-route", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${APPLICATION_HOSTNAME}", + "to": { + "name": "${APPLICATION_NAME}-https-service" + }, + "tls": { + "termination" : "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${GIT_URI}", + "ref": "${GIT_REF}" + }, + "contextDir":"${GIT_CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + } + } + }, + "output": { + "to": { + "name": "${APPLICATION_NAME}" + } + }, + "triggers": [ + { + "type": "github", + "github": { + "secret": "${GITHUB_TRIGGER_SECRET}" + } + }, + { + "type": "generic", + "generic": { + "secret": "${GENERIC_TRIGGER_SECRET}" + } + }, + { + "type": "imageChange", + "imageChange": {} + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "jws-service-account", + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'" + ] + } + }, + "volumeMounts": [ + { + "name": "jws-certificate-volume", + "mountPath": "/etc/jws-secret-volume", + "readOnly": true + } + ], + "ports": [ + { + "name": "${APPLICATION_NAME}-tcp-8080", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "${APPLICATION_NAME}-tcp-8443", + "containerPort": 8443, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_DIR", + "value": "/etc/jws-secret-volume" + }, + { + "name": "JWS_HTTPS_CERTIFICATE", + "value": "${JWS_HTTPS_CERTIFICATE}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_KEY", + "value": "${JWS_HTTPS_CERTIFICATE_KEY}" + }, + { + "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", + "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}" + }, + { + "name": "JWS_ADMIN_USERNAME", + "value": "${JWS_ADMIN_USERNAME}" + }, + { + "name": "JWS_ADMIN_PASSWORD", + "value": "${JWS_ADMIN_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "jws-certificate-volume", + "secret": { + "secretName": "${JWS_HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "jboss-postgresql-92:latest" + } + } + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "registry.access.redhat.com/openshift3_beta/postgresql-92-rhel7:latest", + "ports": [ + { + "name": "${APPLICATION_NAME}-postgresql-tcp-5432", + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml new file mode 100644 index 000000000..5cfda1c89 --- /dev/null +++ b/roles/openshift_examples/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Scott Dodson + description: OpenShift Examples + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.7 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: openshift_common diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml new file mode 100644 index 000000000..da8f603d7 --- /dev/null +++ b/roles/openshift_examples/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Copy openshift examples + synchronize: src=examples dest=/usr/share/openshift + +# RHEL and Centos image streams are mutually exclusive +- name: Import RHEL streams + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ rhel_image_streams }} + when: openshift_examples_load_rhel + register: oex_import_rhel_streams + failed_when: "'already exists' not in oex_import_rhel_streams.stderr and oex_import_rhel_streams.rc != 0" + changed_when: false + +- name: Import Centos Image streams + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ centos_image_streams }} + when: openshift_examples_load_centos | bool + register: oex_import_centos_streams + failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0" + changed_when: false + +- name: Import db templates + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ db_templates_base }} + when: openshift_examples_load_db_templates | bool + register: oex_import_db_templates + failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" + changed_when: false + +- name: Import quickstart-templates + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ quickstarts_base }} + when: openshift_examples_load_quickstarts + register: oex_import_quickstarts + failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" + changed_when: false + + +- name: Import xPaas image streams + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_image_streams }} + when: openshift_examples_load_xpaas | bool + register: oex_import_xpaas_streams + failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" + changed_when: false + +- name: Import xPaas templates + command: > + {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_templates_base }} + when: openshift_examples_load_xpaas | bool + register: oex_import_xpaas_templates + failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" + changed_when: false diff --git a/roles/openshift_examples/templates.sh b/roles/openshift_examples/templates.sh new file mode 100755 index 000000000..4f3050494 --- /dev/null +++ b/roles/openshift_examples/templates.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +EXAMPLES_BASE=$(pwd)/files/examples +find files/examples -name '*.json' -delete +TEMP=`mktemp -d` +pushd $TEMP +wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip +wget https://github.com/openshift/django-ex/archive/master.zip -O django-ex-master.zip +wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master.zip +wget https://github.com/openshift/nodejs-ex/archive/master.zip -O nodejs-ex-master.zip +wget https://github.com/openshift/dancer-ex/archive/master.zip -O dancer-ex-master.zip +wget https://github.com/openshift/cakephp-ex/archive/master.zip -O cakephp-ex-master.zip +wget https://github.com/jboss-openshift/application-templates/archive/master.zip -O application-templates-master.zip +unzip origin-master.zip +unzip django-ex-master.zip +unzip rails-ex-master.zip +unzip nodejs-ex-master.zip +unzip dancer-ex-master.zip +unzip cakephp-ex-master.zip +unzip application-templates-master.zip +cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/ +cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/ +cp django-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ +cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ +cp nodejs-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ +cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ +cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ +mv application-templates-master/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/ +find application-templates-master/ -name '*.json' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \; +popd +git diff files/examples -- cgit v1.2.3 From df51a7dddad9e6f93a24c3ec07a07a661e6e168a Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Fri, 26 Jun 2015 15:04:56 -0400 Subject: delegate_to doesn't appear to be thread safe --- playbooks/common/openshift-node/config.yml | 25 +++++++++++++++++++++++++ roles/openshift_node/tasks/main.yml | 21 --------------------- 2 files changed, 25 insertions(+), 21 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 2d2560db4..d3c223f50 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -135,3 +135,28 @@ roles: - os_env_extras - os_env_extras_node + +- name: Set scheduleability + serial: 1 + hosts: oo_nodes_to_config + tasks: + - name: Check scheduleable state + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.client_binary }} get node {{ openshift.common.hostname }} + register: ond_get_node + until: ond_get_node.rc == 0 + retries: 10 + delay: 5 + + - name: Handle unscheduleable node + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=false + when: openshift_scheduleable is defined and openshift_scheduleable == False and "SchedulingDisabled" not in ond_get_node.stdout + + - name: Handle scheduleable node + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=true + when: (openshift_scheduleable is not defined or openshift_scheduleable == True) and "SchedulingDisabled" in ond_get_node.stdout diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 13f30a6f8..770b55351 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -73,24 +73,3 @@ - name: Start and enable openshift-node service: name=openshift-node enabled=yes state=started - -- name: Check scheduleable state - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.client_binary }} get node {{ openshift.common.hostname }} - register: ond_get_node - until: ond_get_node.rc == 0 - retries: 10 - delay: 5 - -- name: Handle unscheduleable node - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=false - when: openshift_scheduleable is defined and openshift_scheduleable == False and "SchedulingDisabled" not in ond_get_node.stdout - -- name: Handle scheduleable node - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=true - when: (openshift_scheduleable is not defined or openshift_scheduleable == True) and "SchedulingDisabled" in ond_get_node.stdout -- cgit v1.2.3 From cde074730ed8278673498157008651d192c8236a Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Fri, 26 Jun 2015 16:57:28 -0400 Subject: The manage_node commands should only run on the first master --- filter_plugins/oo_filters.py | 7 +++--- playbooks/common/openshift-node/config.yml | 36 +++++++++++------------------- roles/openshift_manage_node/tasks/main.yml | 11 +++++++++ 3 files changed, 28 insertions(+), 26 deletions(-) create mode 100644 roles/openshift_manage_node/tasks/main.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index b7248efaa..0f3f4fa9e 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -52,8 +52,9 @@ class FilterModule(object): @staticmethod def oo_collect(data, attribute=None, filters=None): ''' This takes a list of dict and collects all attributes specified into a - list If filter is specified then we will include all items that match - _ALL_ of filters. + list. If filter is specified then we will include all items that + match _ALL_ of filters. If a dict entry is missing the key in a + filter it will be excluded from the match. Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return {'a':2, 'z': 'z'}, # True, return {'a':3, 'z': 'z'}, # True, return @@ -74,7 +75,7 @@ class FilterModule(object): raise errors.AnsibleFilterError("|fialed expects filter to be a" " dict") retval = [FilterModule.get_attr(d, attribute) for d in data if ( - all([d[key] == filters[key] for key in filters]))] + all([d.get(key, None) == filters[key] for key in filters]))] else: retval = [FilterModule.get_attr(d, attribute) for d in data] diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index d3c223f50..0eec1ae61 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -137,26 +137,16 @@ - os_env_extras_node - name: Set scheduleability - serial: 1 - hosts: oo_nodes_to_config - tasks: - - name: Check scheduleable state - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.client_binary }} get node {{ openshift.common.hostname }} - register: ond_get_node - until: ond_get_node.rc == 0 - retries: 10 - delay: 5 - - - name: Handle unscheduleable node - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=false - when: openshift_scheduleable is defined and openshift_scheduleable == False and "SchedulingDisabled" not in ond_get_node.stdout - - - name: Handle scheduleable node - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname }} --schedulable=true - when: (openshift_scheduleable is not defined or openshift_scheduleable == True) and "SchedulingDisabled" in ond_get_node.stdout + hosts: oo_first_master + vars: + openshift_unscheduleable_nodes: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('openshift_hostname', {'openshift_scheduleable': False}) }}" + pre_tasks: + - set_fact: + openshift_scheduleable_nodes: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('openshift_hostname') + | difference(openshift_unscheduleable_nodes) }}" + roles: + - openshift_manage_node diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml new file mode 100644 index 000000000..d4c623f10 --- /dev/null +++ b/roles/openshift_manage_node/tasks/main.yml @@ -0,0 +1,11 @@ +- name: Handle unscheduleable node + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=false + with_items: openshift_unscheduleable_nodes + +- name: Handle scheduleable node + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true + with_items: openshift_scheduleable_nodes -- cgit v1.2.3 From 93c2bf00cd766771455e82a3fb9fd56d1a1c8dd5 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Tue, 30 Jun 2015 16:54:12 -0400 Subject: Using openshift.common.hostname instead of openshift_hostname for determining node scheduleability --- playbooks/common/openshift-node/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 0eec1ae61..2017a7156 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -141,12 +141,12 @@ vars: openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift_hostname', {'openshift_scheduleable': False}) }}" + | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" pre_tasks: - set_fact: openshift_scheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift_hostname') + | oo_collect('openshift.common.hostname') | difference(openshift_unscheduleable_nodes) }}" roles: - openshift_manage_node -- cgit v1.2.3 From 5fe2016d158d00729ba16b3ef3cc130cecd93620 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Tue, 30 Jun 2015 22:23:03 -0400 Subject: Updates to ans module examples --- playbooks/adhoc/noc/create_maintenance.yml | 36 +++++++++++++++++++++++++++++ playbooks/adhoc/noc/get_zabbix_problems.yml | 2 +- 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 playbooks/adhoc/noc/create_maintenance.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml new file mode 100644 index 000000000..c0ec57ce1 --- /dev/null +++ b/playbooks/adhoc/noc/create_maintenance.yml @@ -0,0 +1,36 @@ +--- +#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml +- name: 'Create a maintenace object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + vars: + oo_hostids: '' + oo_groupids: '' + post_tasks: + - assert: + that: oo_desc is defined + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Maintenance + state: present + params: + name: "{{ oo_name }}" + description: "{{ oo_desc }}" + active_since: "{{ oo_start }}" + active_till: "{{ oo_stop }}" + maintenance_type: "0" + output: extend + hostids: "{{ oo_hostids.split(',') | default([]) }}" +#groupids: "{{ oo_groupids.split(',') | default([]) }}" + timeperiods: + - start_time: "{{ oo_start }}" + period: "{{ oo_stop }}" + selectTimeperiods: extend + + register: maintenance + + - debug: var=maintenance + diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 02bffc1d2..4b94fa228 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -11,7 +11,7 @@ - zbxapi: server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php zbx_class: Trigger - action: get + state: list params: only_true: true output: extend -- cgit v1.2.3 From 3c48b582bf63fdf46efb2eb644f3adac313ffd6d Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Sun, 7 Jun 2015 23:08:55 +0200 Subject: Add a generic mechanism for passing options And use it in the libvirt and openstack playbooks --- README_libvirt.md | 10 +++ bin/cluster | 2 +- lookup_plugins/oo_option.py | 73 ++++++++++++++++++++++ playbooks/libvirt/openshift-cluster/lookup_plugins | 1 + .../openshift-cluster/tasks/launch_instances.yml | 1 + playbooks/libvirt/openshift-cluster/vars.yml | 9 ++- .../openstack/openshift-cluster/lookup_plugins | 1 + playbooks/openstack/openshift-cluster/vars.yml | 43 ++++++++----- 8 files changed, 119 insertions(+), 21 deletions(-) create mode 100644 lookup_plugins/oo_option.py create mode 120000 playbooks/libvirt/openshift-cluster/lookup_plugins create mode 120000 playbooks/openstack/openshift-cluster/lookup_plugins (limited to 'playbooks') diff --git a/README_libvirt.md b/README_libvirt.md index 92f0b3dc9..60af0ac88 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -102,6 +102,16 @@ Test The Setup bin/cluster list libvirt '' ``` +Configuration +------------- + +The following options can be passed via the `-o` flag of the `create` command or as environment variables: + +* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2`): URL of the QCOW2 image to download +* `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on +* `image_sha256` (default to `e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab`): Expected SHA256 checksum of the downloaded image +* `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible` + Creating a cluster ------------------ diff --git a/bin/cluster b/bin/cluster index cb8ff0439..720dd230c 100755 --- a/bin/cluster +++ b/bin/cluster @@ -174,7 +174,7 @@ class Cluster(object): if args.option: for opt in args.option: k, v = opt.split('=', 1) - env['opt_' + k] = v + env['cli_' + k] = v ansible_env = '-e \'{}\''.format( ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py new file mode 100644 index 000000000..35dce48f9 --- /dev/null +++ b/lookup_plugins/oo_option.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +''' +oo_option lookup plugin for openshift-ansible + +Usage: + + - debug: + msg: "{{ lookup('oo_option', '') | default('', True) }}" + +This returns, by order of priority: + +* if it exists, the `cli_` ansible variable. This variable is set by `bin/cluster --option = …` +* if it exists, the envirnoment variable named `` +* if none of the above conditions are met, empty string is returned +''' + +from ansible.utils import template +import os + +# Reason: disable too-few-public-methods because the `run` method is the only +# one required by the Ansible API +# Status: permanently disabled +# pylint: disable=too-few-public-methods +class LookupModule(object): + ''' oo_option lookup plugin main class ''' + + # Reason: disable unused-argument because Ansible is calling us with many + # parameters we are not interested in. + # The lookup plugins of Ansible have this kwargs “catch-all” parameter + # which is not used + # Status: permanently disabled unless Ansible API evolves + # pylint: disable=unused-argument + def __init__(self, basedir=None, **kwargs): + ''' Constructor ''' + self.basedir = basedir + + # Reason: disable unused-argument because Ansible is calling us with many + # parameters we are not interested in. + # The lookup plugins of Ansible have this kwargs “catch-all” parameter + # which is not used + # Status: permanently disabled unless Ansible API evolves + # pylint: disable=unused-argument + def run(self, terms, inject=None, **kwargs): + ''' Main execution path ''' + + try: + terms = template.template(self.basedir, terms, inject) + # Reason: disable broad-except to really ignore any potential exception + # This is inspired by the upstream "env" lookup plugin: + # https://github.com/ansible/ansible/blob/devel/v1/ansible/runner/lookup_plugins/env.py#L29 + # pylint: disable=broad-except + except Exception: + pass + + if isinstance(terms, basestring): + terms = [terms] + + ret = [] + + for term in terms: + option_name = term.split()[0] + cli_key = 'cli_' + option_name + if inject and cli_key in inject: + ret.append(inject[cli_key]) + elif option_name in os.environ: + ret.append(os.environ[option_name]) + else: + ret.append('') + + return ret diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 8291192ab..4cb494056 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -14,6 +14,7 @@ url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' - name: Create the cloud-init config drive path file: diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index 65d954fee..e3c8cd8d0 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -7,9 +7,12 @@ libvirt_uri: 'qemu:///system' deployment_vars: origin: image: - url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2" - name: CentOS-7-x86_64-GenericCloud.qcow2 - sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab + url: "{{ lookup('oo_option', 'image_url') | + default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}" + name: "{{ lookup('oo_option', 'image_name') | + default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}" + sha256: "{{ lookup('oo_option', 'image_sha256') | + default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}" ssh_user: openshift sudo: yes online: diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openstack/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index c754f19fc..1ae7c17d2 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -1,27 +1,36 @@ --- -openstack_infra_heat_stack: "{{ opt_infra_heat_stack | default('files/heat_stack.yml') }}" -openstack_network_prefix: "{{ opt_network_prefix | default('openshift-ansible-'+cluster_id) }}" -openstack_network_cidr: "{{ opt_net_cidr | default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24') }}" -openstack_network_external_net: "{{ opt_external_net | default('external') }}" -openstack_floating_ip_pools: "{{ opt_floating_ip_pools | default('external') | oo_split() }}" -openstack_network_dns: "{{ opt_dns | default('8.8.8.8,8.8.4.4') | oo_split() }}" -openstack_ssh_keypair: "{{ opt_keypair | default(lookup('env', 'LOGNAME')+'_key') }}" -openstack_ssh_public_key: "{{ lookup('file', opt_public_key | default('~/.ssh/id_rsa.pub')) }}" -openstack_ssh_access_from: "{{ opt_ssh_from | default('0.0.0.0/0') }}" +openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | + default('files/heat_stack.yml', True) }}" +openstack_network_prefix: "{{ lookup('oo_option', 'network_prefix' ) | + default('openshift-ansible-'+cluster_id, True) }}" +openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) | + default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}" +openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) | + default('external', True) }}" +openstack_floating_ip_pools: "{{ lookup('oo_option', 'floating_ip_pools') | + default('external', True) | oo_split() }}" +openstack_network_dns: "{{ lookup('oo_option', 'dns' ) | + default('8.8.8.8,8.8.4.4', True) | oo_split() }}" +openstack_ssh_keypair: "{{ lookup('oo_option', 'keypair' ) | + default(lookup('env', 'LOGNAME')+'_key', True) }}" +openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') | + default('~/.ssh/id_rsa.pub', True)) }}" +openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | + default('0.0.0.0/0', True) }}" openstack_flavor: master: - ram: "{{ opt_master_flavor_ram | default(2048) }}" - id: "{{ opt_master_flavor_id | default() }}" - include: "{{ opt_master_flavor_include | default() }}" + ram: "{{ lookup('oo_option', 'master_flavor_ram' ) | default(2048, True) }}" + id: "{{ lookup('oo_option', 'master_flavor_id' ) | default(True) }}" + include: "{{ lookup('oo_option', 'master_flavor_include') | default(True) }}" node: - ram: "{{ opt_node_flavor_ram | default(4096) }}" - id: "{{ opt_node_flavor_id | default() }}" - include: "{{ opt_node_flavor_include | default() }}" + ram: "{{ lookup('oo_option', 'node_flavor_ram' ) | default(4096, True) }}" + id: "{{ lookup('oo_option', 'node_flavor_id' ) | default(True) }}" + include: "{{ lookup('oo_option', 'node_flavor_include' ) | default(True) }}" deployment_vars: origin: image: - name: "{{ opt_image_name | default('centos-70-raw') }}" + name: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" id: ssh_user: openshift sudo: yes @@ -33,7 +42,7 @@ deployment_vars: sudo: no enterprise: image: - name: "{{ opt_image_name | default('centos-70-raw') }}" + name: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}" id: ssh_user: openshift sudo: yes -- cgit v1.2.3 From aadcbc4507a489d4a4d0bfa451e9aa69f22b550f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 9 Jul 2015 10:25:29 -0400 Subject: Latest docker ships docker-storage-setup --- .../aws/openshift-cluster/templates/user_data.j2 | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 7dbc8f552..aea43026f 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,17 +1,4 @@ #cloud-config -yum_repos: - jdetiber-copr: - name: Copr repo for origin owned by jdetiber - baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/ - skip_if_unavailable: true - gpgcheck: true - gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg - enabled: true - -packages: -- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8 -- docker-storage-setup - mounts: - [ xvdb ] - [ ephemeral0 ] @@ -24,6 +11,6 @@ write_files: owner: root:root permissions: '0644' -runcmd: -- systemctl daemon-reload -- systemctl enable lvm2-lvmetad.service docker-storage-setup.service +{% if deployment_type == 'online' %} +disable_root: 0 +{% endif %} -- cgit v1.2.3 From b5cf492509fab422b5d22cd75ea6f938db2deaee Mon Sep 17 00:00:00 2001 From: Troy Dawson Date: Thu, 9 Jul 2015 10:35:51 -0500 Subject: new libra 7.1 ami - updated packages and cloud-init installed --- playbooks/aws/ansible-tower/launch.yml | 2 +- playbooks/aws/openshift-cluster/vars.online.int.yml | 2 +- playbooks/aws/openshift-cluster/vars.online.prod.yml | 2 +- playbooks/aws/openshift-cluster/vars.online.stage.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 4bcc8b8dc..850238ffb 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -6,7 +6,7 @@ vars: inst_region: us-east-1 - rhel7_ami: ami-78756d10 + rhel7_ami: ami-9101c8fa user_data_file: user_data.txt vars_files: diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index e115615d5..e406a7635 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-78756d10 +ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index e115615d5..e406a7635 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-78756d10 +ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index e115615d5..e406a7635 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-78756d10 +ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -- cgit v1.2.3 From 76ad5ac0475a6d1d643b833d19aa0240b3ac95a5 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 9 Jul 2015 11:38:26 -0400 Subject: grow and resize /var partition for online deployment_type --- playbooks/aws/openshift-cluster/templates/user_data.j2 | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index aea43026f..22cccd977 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,4 +1,6 @@ #cloud-config +devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436 + mounts: - [ xvdb ] - [ ephemeral0 ] @@ -13,4 +15,9 @@ write_files: {% if deployment_type == 'online' %} disable_root: 0 +growpart: + mode: auto + devices: ['/var'] +runcmd: +- xfs_growfs /var {% endif %} -- cgit v1.2.3 From bcf750f83a87934027358a4631efa54bed73c05f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 9 Jul 2015 14:49:57 -0400 Subject: include user_data template for all host types --- playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 2 +- playbooks/aws/openshift-cluster/templates/user_data.j2 | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 060147659..d643b647d 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -51,7 +51,7 @@ - set_fact: latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" - user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}" + user_data: "{{ lookup('template', '../templates/user_data.j2') }}" volume_defs: master: root: diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 22cccd977..db14bacd1 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,6 +1,5 @@ #cloud-config -devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436 - +{% if type == 'node' %} mounts: - [ xvdb ] - [ ephemeral0 ] @@ -12,8 +11,11 @@ write_files: path: /etc/sysconfig/docker-storage-setup owner: root:root permissions: '0644' +{% endif %} {% if deployment_type == 'online' %} +devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436 + disable_root: 0 growpart: mode: auto -- cgit v1.2.3 From 1830191258b9148b6ce286fa63d30c41e048a146 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 9 Jul 2015 20:26:33 -0400 Subject: example create_host --- filter_plugins/oo_filters.py | 12 +++++++- playbooks/adhoc/noc/create_host.yml | 55 +++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 playbooks/adhoc/noc/create_host.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 0f3f4fa9e..4e4a7309d 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -223,6 +223,15 @@ class FilterModule(object): # Gather up the values for the list of keys passed in return [x for x in data if x[filter_attr]] + @staticmethod + def oo_build_zabbix_list_dict(values, string): + ''' Build a list of dicts with string as key for each value + ''' + rval = [] + for value in values: + rval.append({string: value}) + return rval + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -235,5 +244,6 @@ class FilterModule(object): "oo_ec2_volume_definition": self.oo_ec2_volume_definition, "oo_combine_key_value": self.oo_combine_key_value, "oo_split": self.oo_split, - "oo_filter_list": self.oo_filter_list + "oo_filter_list": self.oo_filter_list, + "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict } diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml new file mode 100644 index 000000000..d250e6e69 --- /dev/null +++ b/playbooks/adhoc/noc/create_host.yml @@ -0,0 +1,55 @@ +--- +- name: 'Create a host object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + post_tasks: + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Template + state: list + params: + host: ctr_test_kwoodson + filter: + host: + - ctr_kwoodson_test_tmpl + + register: tmpl_results + + - debug: var=tmpl_results + +#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml +- name: 'Create a host object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + post_tasks: + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Host + state: absent + params: + host: ctr_test_kwoodson + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 127.0.0.1 + dns: "" + port: 10050 + groups: + - groupid: 1 + templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}" + output: extend + filter: + host: + - ctr_test_kwoodson + + register: host_results + + - debug: var=host_results + -- cgit v1.2.3 From 025011c1e462d8419b81d8c5085cb92163ac4280 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 8 Jul 2015 23:19:27 -0400 Subject: Bug 1241342 - Adding retry logic to handle node registration race conditions --- playbooks/common/openshift-node/config.yml | 3 +++ roles/openshift_manage_node/tasks/main.yml | 9 +++++++++ 2 files changed, 12 insertions(+) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 2017a7156..1cf5616ce 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -139,6 +139,9 @@ - name: Set scheduleability hosts: oo_first_master vars: + openshift_nodes: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index b9ab16e7c..d17f3f532 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -1,3 +1,12 @@ +- name: Wait for Node Registration + command: > + {{ openshift.common.client_binary }} get node {{ item }} + register: omd_get_node + until: omd_get_node.rc == 0 + retries: 10 + delay: 5 + with_items: openshift_nodes + - name: Handle unscheduleable node command: > {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=false -- cgit v1.2.3 From 9cd8ad65fea0b637a4fbf709f419f9c11785c3e9 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 10 Jul 2015 11:48:41 -0400 Subject: Adding ansible upstream sequence plugin to work around the bug in 1.9.2 --- lookup_plugins/sequence.py | 215 ++++++++++++++++++++++ playbooks/aws/openshift-cluster/lookup_plugins | 1 + playbooks/byo/lookup_plugins | 1 + playbooks/byo/openshift-master/lookup_plugins | 1 + playbooks/byo/openshift-node/lookup_plugins | 1 + playbooks/common/openshift-cluster/lookup_plugins | 1 + playbooks/common/openshift-master/lookup_plugins | 1 + playbooks/common/openshift-node/lookup_plugins | 1 + playbooks/gce/openshift-cluster/lookup_plugins | 1 + 9 files changed, 223 insertions(+) create mode 100644 lookup_plugins/sequence.py create mode 120000 playbooks/aws/openshift-cluster/lookup_plugins create mode 120000 playbooks/byo/lookup_plugins create mode 120000 playbooks/byo/openshift-master/lookup_plugins create mode 120000 playbooks/byo/openshift-node/lookup_plugins create mode 120000 playbooks/common/openshift-cluster/lookup_plugins create mode 120000 playbooks/common/openshift-master/lookup_plugins create mode 120000 playbooks/common/openshift-node/lookup_plugins create mode 120000 playbooks/gce/openshift-cluster/lookup_plugins (limited to 'playbooks') diff --git a/lookup_plugins/sequence.py b/lookup_plugins/sequence.py new file mode 100644 index 000000000..8ca9e7b39 --- /dev/null +++ b/lookup_plugins/sequence.py @@ -0,0 +1,215 @@ +# (c) 2013, Jayson Vantuyl +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.errors import AnsibleError +import ansible.utils as utils +from re import compile as re_compile, IGNORECASE + +# shortcut format +NUM = "(0?x?[0-9a-f]+)" +SHORTCUT = re_compile( + "^(" + # Group 0 + NUM + # Group 1: Start + "-)?" + + NUM + # Group 2: End + "(/" + # Group 3 + NUM + # Group 4: Stride + ")?" + + "(:(.+))?$", # Group 5, Group 6: Format String + IGNORECASE +) + + +class LookupModule(object): + """ + sequence lookup module + + Used to generate some sequence of items. Takes arguments in two forms. + + The simple / shortcut form is: + + [start-]end[/stride][:format] + + As indicated by the brackets: start, stride, and format string are all + optional. The format string is in the style of printf. This can be used + to pad with zeros, format in hexadecimal, etc. All of the numerical values + can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8). + Negative numbers are not supported. + + Some examples: + + 5 -> ["1","2","3","4","5"] + 5-8 -> ["5", "6", "7", "8"] + 2-10/2 -> ["2", "4", "6", "8", "10"] + 4:host%02d -> ["host01","host02","host03","host04"] + + The standard Ansible key-value form is accepted as well. For example: + + start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"] + + This format takes an alternate form of "end" called "count", which counts + some number from the starting value. For example: + + count=5 -> ["1", "2", "3", "4", "5"] + start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"] + start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"] + start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"] + + The count option is mostly useful for avoiding off-by-one errors and errors + calculating the number of entries in a sequence when a stride is specified. + """ + + def __init__(self, basedir, **kwargs): + """absorb any keyword args""" + self.basedir = basedir + + def reset(self): + """set sensible defaults""" + self.start = 1 + self.count = None + self.end = None + self.stride = 1 + self.format = "%d" + + def parse_kv_args(self, args): + """parse key-value style arguments""" + for arg in ["start", "end", "count", "stride"]: + try: + arg_raw = args.pop(arg, None) + if arg_raw is None: + continue + arg_cooked = int(arg_raw, 0) + setattr(self, arg, arg_cooked) + except ValueError: + raise AnsibleError( + "can't parse arg %s=%r as integer" + % (arg, arg_raw) + ) + if 'format' in args: + self.format = args.pop("format") + if args: + raise AnsibleError( + "unrecognized arguments to with_sequence: %r" + % args.keys() + ) + + def parse_simple_args(self, term): + """parse the shortcut forms, return True/False""" + match = SHORTCUT.match(term) + if not match: + return False + + _, start, end, _, stride, _, format = match.groups() + + if start is not None: + try: + start = int(start, 0) + except ValueError: + raise AnsibleError("can't parse start=%s as integer" % start) + if end is not None: + try: + end = int(end, 0) + except ValueError: + raise AnsibleError("can't parse end=%s as integer" % end) + if stride is not None: + try: + stride = int(stride, 0) + except ValueError: + raise AnsibleError("can't parse stride=%s as integer" % stride) + + if start is not None: + self.start = start + if end is not None: + self.end = end + if stride is not None: + self.stride = stride + if format is not None: + self.format = format + + def sanity_check(self): + if self.count is None and self.end is None: + raise AnsibleError( + "must specify count or end in with_sequence" + ) + elif self.count is not None and self.end is not None: + raise AnsibleError( + "can't specify both count and end in with_sequence" + ) + elif self.count is not None: + # convert count to end + if self.count != 0: + self.end = self.start + self.count * self.stride - 1 + else: + self.start = 0 + self.end = 0 + self.stride = 0 + del self.count + if self.stride > 0 and self.end < self.start: + raise AnsibleError("to count backwards make stride negative") + if self.stride < 0 and self.end > self.start: + raise AnsibleError("to count forward don't make stride negative") + if self.format.count('%') != 1: + raise AnsibleError("bad formatting string: %s" % self.format) + + def generate_sequence(self): + if self.stride > 0: + adjust = 1 + else: + adjust = -1 + numbers = xrange(self.start, self.end + adjust, self.stride) + + for i in numbers: + try: + formatted = self.format % i + yield formatted + except (ValueError, TypeError): + raise AnsibleError( + "problem formatting %r with %r" % self.format + ) + + def run(self, terms, inject=None, **kwargs): + results = [] + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + for term in terms: + try: + self.reset() # clear out things for this iteration + + try: + if not self.parse_simple_args(term): + self.parse_kv_args(utils.parse_kv(term)) + except Exception: + raise AnsibleError( + "unknown error parsing with_sequence arguments: %r" + % term + ) + + self.sanity_check() + if self.stride != 0: + results.extend(self.generate_sequence()) + except AnsibleError: + raise + except Exception, e: + raise AnsibleError( + "unknown error generating sequence: %s" % str(e) + ) + + return results diff --git a/playbooks/aws/openshift-cluster/lookup_plugins b/playbooks/aws/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/aws/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/lookup_plugins b/playbooks/byo/lookup_plugins new file mode 120000 index 000000000..c528bcd1d --- /dev/null +++ b/playbooks/byo/lookup_plugins @@ -0,0 +1 @@ +../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-master/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-node/lookup_plugins b/playbooks/byo/openshift-node/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-node/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-master/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-node/lookup_plugins b/playbooks/common/openshift-node/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-node/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/gce/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/gce/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file -- cgit v1.2.3 From f08e64ac98a62863dfd7b7802338a0a7f4770188 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 10 Jul 2015 13:50:03 -0400 Subject: Remove outdated playbooks - Remove aws openshift-node and openshift-master playbooks - Rmove gce openshift-node and openshift-master playbooks - Consolidate aws terminate playbooks --- playbooks/aws/openshift-cluster/terminate.yml | 51 ++++++++++++++++++- playbooks/aws/openshift-master/config.yml | 19 ------- playbooks/aws/openshift-master/filter_plugins | 1 - playbooks/aws/openshift-master/launch.yml | 70 -------------------------- playbooks/aws/openshift-master/roles | 1 - playbooks/aws/openshift-master/terminate.yml | 2 - playbooks/aws/openshift-node/config.yml | 26 ---------- playbooks/aws/openshift-node/filter_plugins | 1 - playbooks/aws/openshift-node/launch.yml | 72 --------------------------- playbooks/aws/openshift-node/roles | 1 - playbooks/aws/openshift-node/terminate.yml | 2 - playbooks/aws/terminate.yml | 64 ------------------------ playbooks/gce/openshift-master/config.yml | 18 ------- playbooks/gce/openshift-master/filter_plugins | 1 - playbooks/gce/openshift-master/launch.yml | 51 ------------------- playbooks/gce/openshift-master/roles | 1 - playbooks/gce/openshift-master/terminate.yml | 35 ------------- playbooks/gce/openshift-node/config.yml | 25 ---------- playbooks/gce/openshift-node/filter_plugins | 1 - playbooks/gce/openshift-node/launch.yml | 51 ------------------- playbooks/gce/openshift-node/roles | 1 - playbooks/gce/openshift-node/terminate.yml | 35 ------------- 22 files changed, 50 insertions(+), 479 deletions(-) delete mode 100644 playbooks/aws/openshift-master/config.yml delete mode 120000 playbooks/aws/openshift-master/filter_plugins delete mode 100644 playbooks/aws/openshift-master/launch.yml delete mode 120000 playbooks/aws/openshift-master/roles delete mode 100644 playbooks/aws/openshift-master/terminate.yml delete mode 100644 playbooks/aws/openshift-node/config.yml delete mode 120000 playbooks/aws/openshift-node/filter_plugins delete mode 100644 playbooks/aws/openshift-node/launch.yml delete mode 120000 playbooks/aws/openshift-node/roles delete mode 100644 playbooks/aws/openshift-node/terminate.yml delete mode 100644 playbooks/aws/terminate.yml delete mode 100644 playbooks/gce/openshift-master/config.yml delete mode 120000 playbooks/gce/openshift-master/filter_plugins delete mode 100644 playbooks/gce/openshift-master/launch.yml delete mode 120000 playbooks/gce/openshift-master/roles delete mode 100644 playbooks/gce/openshift-master/terminate.yml delete mode 100644 playbooks/gce/openshift-node/config.yml delete mode 120000 playbooks/gce/openshift-node/filter_plugins delete mode 100644 playbooks/gce/openshift-node/launch.yml delete mode 120000 playbooks/gce/openshift-node/roles delete mode 100644 playbooks/gce/openshift-node/terminate.yml (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 617d0d456..361ab2d37 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -13,4 +13,53 @@ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([]) | difference(['localhost']) -- include: ../terminate.yml +- name: Terminate instances + hosts: localhost + connection: local + gather_facts: no + vars: + host_vars: "{{ hostvars + | oo_select_keys(groups['oo_hosts_to_terminate']) }}" + tasks: + - name: Remove tags from instances + ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent + args: + tags: + env: "{{ item['ec2_tag_env'] }}" + host-type: "{{ item['ec2_tag_host-type'] }}" + env-host-type: "{{ item['ec2_tag_env-host-type'] }}" + with_items: host_vars + when: "'oo_hosts_to_terminate' in groups" + + - name: Terminate instances + ec2: + state: absent + instance_ids: ["{{ item.ec2_id }}"] + region: "{{ item.ec2_region }}" + ignore_errors: yes + register: ec2_term + with_items: host_vars + when: "'oo_hosts_to_terminate' in groups" + + # Fail if any of the instances failed to terminate with an error other + # than 403 Forbidden + - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} + when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + with_items: ec2_term.results + + - name: Stop instance if termination failed + ec2: + state: stopped + instance_ids: ["{{ item.item.ec2_id }}"] + region: "{{ item.item.ec2_region }}" + register: ec2_stop + when: "'oo_hosts_to_terminate' in groups and item.failed" + with_items: ec2_term.results + + - name: Rename stopped instances + ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present + args: + tags: + Name: "{{ item.item.item.ec2_tag_Name }}-terminate" + with_items: ec2_stop.results + when: "'oo_hosts_to_terminate' in groups" diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml deleted file mode 100644 index 37ab4fbe6..000000000 --- a/playbooks/aws/openshift-master/config.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Populate oo_masters_to_config host group - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: root - with_items: oo_host_group_exp | default([]) - -- include: ../../common/openshift-master/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ec2_private_ip_address }}" - openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-master/filter_plugins b/playbooks/aws/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/aws/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml deleted file mode 100644 index 1cefad492..000000000 --- a/playbooks/aws/openshift-master/launch.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - gather_facts: no - -# TODO: modify g_ami based on deployment_type - vars: - inst_region: us-east-1 - g_ami: ami-86781fee - user_data_file: user_data.txt - - tasks: - - name: Launch instances - ec2: - state: present - region: "{{ inst_region }}" - keypair: libra - group: ['public'] - instance_type: m3.large - image: "{{ g_ami }}" - count: "{{ oo_new_inst_names | length }}" - user_data: "{{ lookup('file', user_data_file) }}" - wait: yes - register: ec2 - - - name: Add new instances public IPs to the host group - add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances" - with_items: ec2.instances - - - name: Add Name and environment tags to instances - ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" - with_together: - - oo_new_inst_names - - ec2.instances - args: - tags: - Name: "{{ item.0 }}" - - - name: Add other tags to instances - ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present - with_items: ec2.instances - args: - tags: "{{ oo_new_inst_tags }}" - - - name: Add new instances public IPs to oo_masters_to_config - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - groupname: oo_masters_to_config - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - with_together: - - oo_new_inst_names - - ec2.instances - - - name: Wait for ssh - wait_for: port=22 host={{ item.dns_name }} - with_items: ec2.instances - - - name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: ec2.instances - -# Apply the configs, seprate so that just the configs can be run by themselves -- include: config.yml diff --git a/playbooks/aws/openshift-master/roles b/playbooks/aws/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/aws/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml deleted file mode 100644 index 07d9961bc..000000000 --- a/playbooks/aws/openshift-master/terminate.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: ../terminate.yml diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml deleted file mode 100644 index a993a1e99..000000000 --- a/playbooks/aws/openshift-node/config.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Populate oo_nodes_to_config and oo_first_master host groups - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: root - with_items: oo_host_group_exp | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - ansible_ssh_user: root - - -- include: ../../common/openshift-node/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" - openshift_hostname: "{{ ec2_private_ip_address }}" - openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-node/filter_plugins b/playbooks/aws/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/aws/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml deleted file mode 100644 index e7d1f7310..000000000 --- a/playbooks/aws/openshift-node/launch.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - gather_facts: no - -# TODO: modify g_ami based on deployment_type - vars: - inst_region: us-east-1 - g_ami: ami-86781fee - user_data_file: user_data.txt - - tasks: - - name: Launch instances - ec2: - state: present - region: "{{ inst_region }}" - keypair: libra - group: ['public'] - instance_type: m3.large - image: "{{ g_ami }}" - count: "{{ oo_new_inst_names | length }}" - user_data: "{{ lookup('file', user_data_file) }}" - wait: yes - register: ec2 - - - name: Add new instances public IPs to the host group - add_host: - hostname: "{{ item.public_ip }}" - groupname: new_ec2_instances" - with_items: ec2.instances - - - name: Add Name and environment tags to instances - ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present - with_together: - - oo_new_inst_names - - ec2.instances - args: - tags: - Name: "{{ item.0 }}" - - - name: Add other tags to instances - ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present - with_items: ec2.instances - args: - tags: "{{ oo_new_inst_tags }}" - - - name: Add new instances public IPs to oo_nodes_to_config - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - groupname: oo_nodes_to_config - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - with_together: - - oo_new_inst_names - - ec2.instances - - - name: Wait for ssh - wait_for: port=22 host={{ item.dns_name }} - with_items: ec2.instances - - - name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: ec2.instances - -# Apply the configs, seprate so that just the configs can be run by themselves -- include: config.yml diff --git a/playbooks/aws/openshift-node/roles b/playbooks/aws/openshift-node/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/aws/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml deleted file mode 100644 index 07d9961bc..000000000 --- a/playbooks/aws/openshift-node/terminate.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: ../terminate.yml diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml deleted file mode 100644 index e9767b260..000000000 --- a/playbooks/aws/terminate.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -- name: Populate oo_hosts_to_terminate host group - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_hosts_to_terminate - add_host: name={{ item }} groups=oo_hosts_to_terminate - with_items: oo_host_group_exp | default([]) - -- name: Gather dynamic inventory variables for hosts to terminate - hosts: oo_hosts_to_terminate - gather_facts: no - -- name: Terminate instances - hosts: localhost - connection: local - gather_facts: no - vars: - host_vars: "{{ hostvars - | oo_select_keys(groups['oo_hosts_to_terminate']) }}" - tasks: - - name: Remove tags from instances - ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent - args: - tags: - env: "{{ item['ec2_tag_env'] }}" - host-type: "{{ item['ec2_tag_host-type'] }}" - env-host-type: "{{ item['ec2_tag_env-host-type'] }}" - with_items: host_vars - when: "'oo_hosts_to_terminate' in groups" - - - name: Terminate instances - ec2: - state: absent - instance_ids: ["{{ item.ec2_id }}"] - region: "{{ item.ec2_region }}" - ignore_errors: yes - register: ec2_term - with_items: host_vars - when: "'oo_hosts_to_terminate' in groups" - - # Fail if any of the instances failed to terminate with an error other - # than 403 Forbidden - - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" - with_items: ec2_term.results - - - name: Stop instance if termination failed - ec2: - state: stopped - instance_ids: ["{{ item.item.ec2_id }}"] - region: "{{ item.item.ec2_region }}" - register: ec2_stop - when: item.failed - with_items: ec2_term.results - when: "'oo_hosts_to_terminate' in groups" - - - name: Rename stopped instances - ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present - args: - tags: - Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: ec2_stop.results - when: "'oo_hosts_to_terminate' in groups" diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml deleted file mode 100644 index af6000bc8..000000000 --- a/playbooks/gce/openshift-master/config.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Populate oo_masters_to_config host group - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: root - with_items: oo_host_group_exp | default([]) - -- include: ../../common/openshift-master/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-master/filter_plugins b/playbooks/gce/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/gce/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml deleted file mode 100644 index ef10b6cf0..000000000 --- a/playbooks/gce/openshift-master/launch.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# TODO: when we are ready to go to ansible 1.9+ support only, we can update to -# the gce task to use the disk_auto_delete parameter to avoid having to delete -# the disk as a separate step on termination - -- name: Launch instance(s) - hosts: localhost - connection: local - gather_facts: no - -# TODO: modify image based on deployment_type - vars: - inst_names: "{{ oo_new_inst_names }}" - machine_type: n1-standard-1 - image: libra-rhel7 - - tasks: - - name: Launch instances - gce: - instance_names: "{{ inst_names }}" - machine_type: "{{ machine_type }}" - image: "{{ image }}" - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - tags: "{{ oo_new_inst_tags }}" - register: gce - - - name: Add new instances public IPs to oo_masters_to_config - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - groupname: oo_masters_to_config - gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data - - - name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} - with_items: gce.instance_data - - - name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data - - -# Apply the configs, separate so that just the configs can be run by themselves -- include: config.yml diff --git a/playbooks/gce/openshift-master/roles b/playbooks/gce/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/gce/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml deleted file mode 100644 index 452ac5199..000000000 --- a/playbooks/gce/openshift-master/terminate.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Populate oo_masters_to_terminate host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_masters_to_terminate - add_host: name={{ item }} groups=oo_masters_to_terminate - with_items: oo_host_group_exp | default([]) - -- name: Terminate master instances - hosts: localhost - connection: local - gather_facts: no - tasks: - - name: Terminate master instances - gce: - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - state: 'absent' - instance_names: "{{ groups['oo_masters_to_terminate'] }}" - disks: "{{ groups['oo_masters_to_terminate'] }}" - register: gce - when: "'oo_masters_to_terminate' in groups" - - - name: Remove disks of instances - gce_pd: - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - name: "{{ item }}" - zone: "{{ gce.zone }}" - state: absent - with_items: gce.instance_names - when: "'oo_masters_to_terminate' in groups" diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml deleted file mode 100644 index 54b0da2ca..000000000 --- a/playbooks/gce/openshift-node/config.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Populate oo_nodes_to_config and oo_first_master host groups - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: root - with_items: oo_host_group_exp | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - ansible_ssh_user: root - - -- include: ../../common/openshift-node/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" - openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-node/filter_plugins b/playbooks/gce/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/gce/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml deleted file mode 100644 index 086ba58bc..000000000 --- a/playbooks/gce/openshift-node/launch.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# TODO: when we are ready to go to ansible 1.9+ support only, we can update to -# the gce task to use the disk_auto_delete parameter to avoid having to delete -# the disk as a separate step on termination - -- name: Launch instance(s) - hosts: localhost - connection: local - gather_facts: no - -# TODO: modify image based on deployment_type - vars: - inst_names: "{{ oo_new_inst_names }}" - machine_type: n1-standard-1 - image: libra-rhel7 - - tasks: - - name: Launch instances - gce: - instance_names: "{{ inst_names }}" - machine_type: "{{ machine_type }}" - image: "{{ image }}" - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - tags: "{{ oo_new_inst_tags }}" - register: gce - - - name: Add new instances public IPs to oo_nodes_to_config - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - groupname: oo_nodes_to_config - gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data - - - name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} - with_items: gce.instance_data - - - name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data - - -# Apply the configs, separate so that just the configs can be run by themselves -- include: config.yml diff --git a/playbooks/gce/openshift-node/roles b/playbooks/gce/openshift-node/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/gce/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml deleted file mode 100644 index 357e0c295..000000000 --- a/playbooks/gce/openshift-node/terminate.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Populate oo_nodes_to_terminate host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_nodes_to_terminate - add_host: name={{ item }} groups=oo_nodes_to_terminate - with_items: oo_host_group_exp | default([]) - -- name: Terminate node instances - hosts: localhost - connection: local - gather_facts: no - tasks: - - name: Terminate node instances - gce: - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - state: 'absent' - instance_names: "{{ groups['oo_nodes_to_terminate'] }}" - disks: "{{ groups['oo_nodes_to_terminate'] }}" - register: gce - when: "'oo_nodes_to_terminate' in groups" - - - name: Remove disks of instances - gce_pd: - service_account_email: "{{ gce_service_account_email }}" - pem_file: "{{ gce_pem_file }}" - project_id: "{{ gce_project_id }}" - name: "{{ item }}" - zone: "{{ gce.zone }}" - state: absent - with_items: gce.instance_names - when: "'oo_nodes_to_terminate' in groups" -- cgit v1.2.3 From e7082b9870bdf4cc0769645f4fae3bccc3efdee4 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 12 Jun 2015 14:52:03 -0400 Subject: Add etcd role that builds out basic etcd cluster - Add initial etcd role - Add etcd playbook to create etcd client certs - Hookup master to etcd --- playbooks/byo/config.yml | 4 + playbooks/byo/etcd/config.yml | 7 ++ playbooks/byo/etcd/filter_plugins | 1 + playbooks/byo/etcd/roles | 1 + playbooks/byo/openshift-etcd/config.yml | 20 +++++ playbooks/byo/openshift-etcd/filter_plugins | 1 + playbooks/byo/openshift-etcd/roles | 1 + playbooks/common/openshift-etcd/config.yml | 106 +++++++++++++++++++++++ playbooks/common/openshift-etcd/filter_plugins | 1 + playbooks/common/openshift-etcd/roles | 1 + roles/etcd/README.md | 39 +++++++++ roles/etcd/defaults/main.yaml | 28 ++++++ roles/etcd/handlers/main.yml | 3 + roles/etcd/meta/main.yml | 17 ++++ roles/etcd/tasks/main.yml | 16 ++++ roles/etcd/templates/etcd.conf.j2 | 46 ++++++++++ roles/openshift_etcd_certs/README.md | 34 ++++++++ roles/openshift_etcd_certs/meta/main.yml | 16 ++++ roles/openshift_etcd_certs/tasks/main.yml | 33 +++++++ roles/openshift_etcd_certs/vars/main.yml | 8 ++ roles/openshift_facts/library/openshift_facts.py | 17 +++- roles/openshift_master/tasks/main.yml | 1 + 22 files changed, 398 insertions(+), 3 deletions(-) create mode 100644 playbooks/byo/etcd/config.yml create mode 120000 playbooks/byo/etcd/filter_plugins create mode 120000 playbooks/byo/etcd/roles create mode 100644 playbooks/byo/openshift-etcd/config.yml create mode 120000 playbooks/byo/openshift-etcd/filter_plugins create mode 120000 playbooks/byo/openshift-etcd/roles create mode 100644 playbooks/common/openshift-etcd/config.yml create mode 120000 playbooks/common/openshift-etcd/filter_plugins create mode 120000 playbooks/common/openshift-etcd/roles create mode 100644 roles/etcd/README.md create mode 100644 roles/etcd/defaults/main.yaml create mode 100644 roles/etcd/handlers/main.yml create mode 100644 roles/etcd/meta/main.yml create mode 100644 roles/etcd/tasks/main.yml create mode 100644 roles/etcd/templates/etcd.conf.j2 create mode 100644 roles/openshift_etcd_certs/README.md create mode 100644 roles/openshift_etcd_certs/meta/main.yml create mode 100644 roles/openshift_etcd_certs/tasks/main.yml create mode 100644 roles/openshift_etcd_certs/vars/main.yml (limited to 'playbooks') diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml index e059514db..092eb9978 100644 --- a/playbooks/byo/config.yml +++ b/playbooks/byo/config.yml @@ -3,6 +3,10 @@ include: openshift-master/config.yml when: groups.masters is defined and groups.masters +- name: Run the openshift-etcd playbook + include: openshift-etcd/config.yml + when: groups.etcd is defined and groups.etcd + - name: Run the openshift-node config playbook include: openshift-node/config.yml when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters diff --git a/playbooks/byo/etcd/config.yml b/playbooks/byo/etcd/config.yml new file mode 100644 index 000000000..0c96b2541 --- /dev/null +++ b/playbooks/byo/etcd/config.yml @@ -0,0 +1,7 @@ +## deploys a simple etcd cluster, this cluster does not provide client side ssl +## and cannot be used directly for openshift. This should only be used for testing. +--- +- name: Configure etcd + hosts: etcd + roles: + - etcd diff --git a/playbooks/byo/etcd/filter_plugins b/playbooks/byo/etcd/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/byo/etcd/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/byo/etcd/roles b/playbooks/byo/etcd/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/byo/etcd/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml new file mode 100644 index 000000000..381f139de --- /dev/null +++ b/playbooks/byo/openshift-etcd/config.yml @@ -0,0 +1,20 @@ +--- +- name: Populate oo_etcd_hosts_to_config and oo_first_master host groups + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_etcd_hosts_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_config + with_items: groups.etcd + - name: Evaluate oo_first_master + add_host: + name: "{{ item }}" + groups: oo_first_master + with_items: groups.masters.0 + + +- include: ../../common/openshift-etcd/config.yml + vars: + openshift_first_master: "{{ groups.masters.0 }}" diff --git a/playbooks/byo/openshift-etcd/filter_plugins b/playbooks/byo/openshift-etcd/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-etcd/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-etcd/roles b/playbooks/byo/openshift-etcd/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-etcd/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml new file mode 100644 index 000000000..2c920df49 --- /dev/null +++ b/playbooks/common/openshift-etcd/config.yml @@ -0,0 +1,106 @@ +--- +- name: Gather and set facts for etcd hosts + hosts: oo_etcd_hosts_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + - name: Check for etcd certificates + stat: + path: "{{ item }}" + with_items: + - "/etc/etcd/ca.crt" + - "/etc/etcd/client.crt" + - "/etc/etcd/client.key" + - "/etc/etcd/peer-ca.crt" + - "/etc/etcd/peer.crt" + - "/etc/etcd/peer.key" + register: g_etcd_certs_stat + - set_fact: + etcd_certs_missing: "{{ g_etcd_certs_stat.results | map(attribute='stat.exists') + | list | intersect([false])}}" + etcd_subdir: etcd-{{ openshift.common.hostname }} + etcd_dir: /etc/openshift/generated-configs/etcd-{{ openshift.common.hostname }} + etcd_cert_dir: /etc/etcd + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + sudo: false + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_etcd_mktemp + changed_when: False + +- name: Create etcd certs + hosts: oo_first_master + vars: + etcd_hosts_needing_certs: "{{ hostvars + | oo_select_keys(groups['oo_etcd_hosts_to_config']) + | oo_filter_list(filter_attr='etcd_certs_missing') }}" + etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_hosts_to_config']) }}" + sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" + roles: + - openshift_etcd_certs + post_tasks: + - name: Create a tarball of the etcd certs + command: > + tar -czvf {{ item.etcd_dir }}.tgz + -C {{ item.etcd_dir }} . + args: + creates: "{{ item.etcd_dir }}.tgz" + with_items: etcd_hosts_needing_certs + + - name: Retrieve the etcd cert tarballs from the master + fetch: + src: "{{ item.etcd_dir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: etcd_hosts_needing_certs + +- name: Deploy etcd + hosts: oo_etcd_hosts_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" + etcd_url_scheme: https + pre_tasks: + - name: Ensure certificate directory exists + file: + path: "{{ etcd_cert_dir }}" + state: directory + - name: Unarchive the tarball on the node + unarchive: + src: "{{ sync_tmpdir }}/{{ etcd_subdir }}.tgz" + dest: "{{ etcd_cert_dir }}" + when: etcd_certs_missing + - file: path=/etc/etcd/client.crt mode=0600 owner=etcd group=etcd + - file: path=/etc/etcd/client.key mode=0600 owner=etcd group=etcd + - file: path=/etc/etcd/ca.crt mode=0644 owner=etcd group=etcd + roles: + - etcd + +- name: Delete the temporary directory on the master + hosts: oo_first_master + gather_facts: no + vars: + sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" + tasks: + - file: name={{ sync_tmpdir }} state=absent + changed_when: False + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + sudo: false + gather_facts: no + tasks: + - file: name={{ g_etcd_mktemp.stdout }} state=absent + changed_when: False diff --git a/playbooks/common/openshift-etcd/filter_plugins b/playbooks/common/openshift-etcd/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-etcd/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-etcd/roles b/playbooks/common/openshift-etcd/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-etcd/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/etcd/README.md b/roles/etcd/README.md new file mode 100644 index 000000000..49207c428 --- /dev/null +++ b/roles/etcd/README.md @@ -0,0 +1,39 @@ +Role Name +========= + +Configures an etcd cluster for an arbitrary number of hosts + +Requirements +------------ + +This role assumes it's being deployed on a RHEL/Fedora based host with package +named 'etcd' available via yum. + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: etcd + roles: + - { etcd } + +License +------- + +MIT + +Author Information +------------------ + +Scott Dodson +Adapted from https://github.com/retr0h/ansible-etcd for use on RHEL/Fedora. We +should at some point submit a PR to merge this with that module. diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml new file mode 100644 index 000000000..f6281101f --- /dev/null +++ b/roles/etcd/defaults/main.yaml @@ -0,0 +1,28 @@ +--- +etcd_interface: eth0 +etcd_client_port: 2379 +etcd_peer_port: 2380 +etcd_peers_group: etcd +etcd_url_scheme: http +etcd_peer_url_scheme: http +etcd_ca_file: /etc/etcd/ca.crt +etcd_cert_file: /etc/etcd/client.crt +etcd_key_file: /etc/etcd/client.key +etcd_peer_ca_file: /etc/etcd/ca.crt +etcd_peer_cert_file: /etc/etcd/peer.crt +etcd_peer_key_file: /etc/etcd/peer.key + +etcd_initial_cluster_state: new +etcd_initial_cluster_token: etcd-cluster-1 + +etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}" +etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}" +etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}" +etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}" + +etcd_data_dir: /var/lib/etcd/ +os_firewall_allow: +- service: etcd + port: "{{etcd_client_port}}/tcp" +- service: etcd peering + port: "{{ etcd_peer_port }}/tcp" diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml new file mode 100644 index 000000000..b897913f9 --- /dev/null +++ b/roles/etcd/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart etcd + service: name=etcd state=restarted diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml new file mode 100644 index 000000000..f952f84be --- /dev/null +++ b/roles/etcd/meta/main.yml @@ -0,0 +1,17 @@ +--- +# This module is based on https://github.com/retr0h/ansible-etcd with most +# changes centered around installing from a pre-existing rpm +# TODO: Extend https://github.com/retr0h/ansible-etcd rather than forking +galaxy_info: + author: Scott Dodson + description: etcd management + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml new file mode 100644 index 000000000..8ed803119 --- /dev/null +++ b/roles/etcd/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Install etcd + yum: pkg=etcd state=present disable_gpg_check=yes + +- name: Write etcd global config file + template: + src: etcd.conf.j2 + dest: /etc/etcd/etcd.conf + notify: + - restart etcd + +- name: Enable etcd + service: + name: etcd + state: started + enabled: yes diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 new file mode 100644 index 000000000..5723b5089 --- /dev/null +++ b/roles/etcd/templates/etcd.conf.j2 @@ -0,0 +1,46 @@ +{% macro initial_cluster() -%} +{% for host in groups[etcd_peers_group] -%} +{% if loop.last -%} +{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }} +{%- else -%} +{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}, +{%- endif -%} +{% endfor -%} +{% endmacro -%} + +ETCD_NAME={{ inventory_hostname }} +ETCD_DATA_DIR={{ etcd_data_dir }} +#ETCD_SNAPSHOT_COUNTER="10000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} +ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +#ETCD_CORS="" +# +#[cluster] +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} +ETCD_INITIAL_CLUSTER={{ initial_cluster() }} +ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} +ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_SRV="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +# +#[proxy] +#ETCD_PROXY="off" +# +#[security] +{% if etcd_url_scheme == 'https' -%} +ETCD_CA_FILE={{ etcd_ca_file }} +ETCD_CERT_FILE={{ etcd_cert_file }} +ETCD_KEY_FILE={{ etcd_key_file }} +{% endif -%} +{% if etcd_peer_url_scheme == 'https' -%} +ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }} +ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }} +ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} +{% endif -%} diff --git a/roles/openshift_etcd_certs/README.md b/roles/openshift_etcd_certs/README.md new file mode 100644 index 000000000..efac6d9fe --- /dev/null +++ b/roles/openshift_etcd_certs/README.md @@ -0,0 +1,34 @@ +OpenShift etcd certs +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Scott Dodson (sdodson@redhat.com) diff --git a/roles/openshift_etcd_certs/meta/main.yml b/roles/openshift_etcd_certs/meta/main.yml new file mode 100644 index 000000000..4847ba94b --- /dev/null +++ b/roles/openshift_etcd_certs/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Scott Dodson + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_etcd_certs/tasks/main.yml b/roles/openshift_etcd_certs/tasks/main.yml new file mode 100644 index 000000000..04b411117 --- /dev/null +++ b/roles/openshift_etcd_certs/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: Create openshift_generated_configs_dir if it doesn't exist + file: + path: "{{ openshift_generated_configs_dir }}" + state: directory + +- name: Create openshift_generated_configs_dir for each etcd host + file: + path: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname}}" + state: directory + with_items: etcd_hosts_needing_certs + +- name: Generate the etcd client side certs + delegate_to: "{{ openshift_first_master }}" + command: > + {{ openshift.common.admin_binary }} create-server-cert + --cert=client.crt --key=client.key --overwrite=true + --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname, item.openshift.common.ip]|unique|join(",") }} + --signer-cert={{ openshift_master_ca_cert }} + --signer-key={{ openshift_master_ca_key }} + --signer-serial={{ openshift_master_ca_serial }} + args: + chdir: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}" + creates: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}/client.crt" + with_items: etcd_hosts_needing_certs + +- name: Copy CA cert + delegate_to: "{{ openshift_first_master }}" + command: "cp {{ openshift_master_ca_cert }} ." + args: + chdir: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}" + creates: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}/ca.crt" + with_items: etcd_hosts_needing_certs diff --git a/roles/openshift_etcd_certs/vars/main.yml b/roles/openshift_etcd_certs/vars/main.yml new file mode 100644 index 000000000..3801b8427 --- /dev/null +++ b/roles/openshift_etcd_certs/vars/main.yml @@ -0,0 +1,8 @@ +--- +openshift_node_config_dir: /etc/openshift/node +openshift_master_config_dir: /etc/openshift/master +openshift_generated_configs_dir: /etc/openshift/generated-configs +openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" +openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" +openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" +openshift_kube_api_version: v1beta3 diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ca5ea1da0..e9a2ceffb 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -366,13 +366,24 @@ def set_url_facts_if_unset(facts): console_port = facts['master']['console_port'] console_path = facts['master']['console_path'] etcd_use_ssl = facts['master']['etcd_use_ssl'] + etcd_hosts = facts['master']['etcd_hosts'] etcd_port = facts['master']['etcd_port'], hostname = facts['common']['hostname'] public_hostname = facts['common']['public_hostname'] if 'etcd_urls' not in facts['master']: - facts['master']['etcd_urls'] = [format_url(etcd_use_ssl, hostname, - etcd_port)] + etcd_urls = [] + if etcd_hosts != '': + etcd_port = 2379 + facts['master']['etcd_port'] = etcd_port + facts['master']['embedded_etcd'] = False + for host in etcd_hosts: + etcd_urls.append(format_url(etcd_use_ssl, host, + etcd_port)) + else: + etcd_urls = [format_url(etcd_use_ssl, hostname, + etcd_port)] + facts['master']['etcd_urls'] = etcd_urls if 'api_url' not in facts['master']: facts['master']['api_url'] = format_url(api_use_ssl, hostname, api_port) @@ -695,7 +706,7 @@ class OpenShiftFacts(object): if 'master' in roles: master = dict(api_use_ssl=True, api_port='8443', console_use_ssl=True, console_path='/console', - console_port='8443', etcd_use_ssl=True, + console_port='8443', etcd_use_ssl=True, etcd_hosts='', etcd_port='4001', portal_net='172.30.0.0/16', embedded_etcd=True, embedded_kube=True, embedded_dns=True, dns_port='53', diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index da0a663ec..f6bd2bf2e 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -31,6 +31,7 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_hosts: "{{ groups['etcd'] | default(None)}}" etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" -- cgit v1.2.3 From add3fbcce31e9db4ea8c76acb9c8579f20581912 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 10 Jul 2015 14:46:43 -0400 Subject: Etcd role updates and playbook updates - fix firewall conflict issues with co-located etcd and openshift hosts - added os_firewall dependency to etcd role - updated etcd template to better handle clustered and non-clustered installs - added etcd_ca role - generates a self-signed cert to manage etcd certificates, since etcd peer certificates are required to be client and server certs and the openshift ca will only generate client or server certs (not one authorized for both). - renamed openshift_etcd_certs role to etcd_certificates and updated it to manage certificates generated from the CA managed by the etcd_ca role - remove hard coded etcd_port in openshift_facts - updates for the openshift-etcd common playbook - removed etcd and openshift-etcd playbooks from the byo playbooks directory - added a common playbook for setting etcd launch facts - added an openshift-etcd common service playbook - removed unused variables - fixed tests for embedded_{etcd,dns,kube} in openshift_master - removed old workaround for reloading systemd units --- playbooks/byo/etcd/config.yml | 7 -- playbooks/byo/etcd/filter_plugins | 1 - playbooks/byo/etcd/roles | 1 - playbooks/byo/openshift-etcd/config.yml | 20 ----- playbooks/byo/openshift-etcd/filter_plugins | 1 - playbooks/byo/openshift-etcd/roles | 1 - .../set_etcd_launch_facts_tasks.yml | 13 +++ playbooks/common/openshift-etcd/config.yml | 96 ++++++++++------------ playbooks/common/openshift-etcd/lookup_plugins | 1 + playbooks/common/openshift-etcd/service.yml | 18 ++++ roles/etcd/defaults/main.yaml | 15 ++-- roles/etcd/meta/main.yml | 2 + roles/etcd/tasks/main.yml | 36 +++++++- roles/etcd/templates/etcd.conf.j2 | 16 ++-- roles/etcd_ca/README.md | 34 ++++++++ roles/etcd_ca/meta/main.yml | 16 ++++ roles/etcd_ca/tasks/main.yml | 44 ++++++++++ roles/etcd_ca/templates/openssl_append.j2 | 51 ++++++++++++ roles/etcd_ca/vars/main.yml | 3 + roles/etcd_certificates/README.md | 34 ++++++++ roles/etcd_certificates/meta/main.yml | 16 ++++ roles/etcd_certificates/tasks/client.yml | 42 ++++++++++ roles/etcd_certificates/tasks/main.yml | 9 ++ roles/etcd_certificates/tasks/server.yml | 73 ++++++++++++++++ roles/etcd_certificates/vars/main.yml | 11 +++ roles/openshift_etcd_certs/README.md | 34 -------- roles/openshift_etcd_certs/meta/main.yml | 16 ---- roles/openshift_etcd_certs/tasks/main.yml | 33 -------- roles/openshift_etcd_certs/vars/main.yml | 8 -- roles/openshift_facts/library/openshift_facts.py | 7 +- roles/openshift_master/tasks/main.yml | 5 -- roles/openshift_master/templates/master.yaml.v1.j2 | 8 +- roles/openshift_node/tasks/main.yml | 8 -- roles/openshift_node_certificates/vars/main.yml | 1 - 34 files changed, 470 insertions(+), 211 deletions(-) delete mode 100644 playbooks/byo/etcd/config.yml delete mode 120000 playbooks/byo/etcd/filter_plugins delete mode 120000 playbooks/byo/etcd/roles delete mode 100644 playbooks/byo/openshift-etcd/config.yml delete mode 120000 playbooks/byo/openshift-etcd/filter_plugins delete mode 120000 playbooks/byo/openshift-etcd/roles create mode 100644 playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml create mode 120000 playbooks/common/openshift-etcd/lookup_plugins create mode 100644 playbooks/common/openshift-etcd/service.yml create mode 100644 roles/etcd_ca/README.md create mode 100644 roles/etcd_ca/meta/main.yml create mode 100644 roles/etcd_ca/tasks/main.yml create mode 100644 roles/etcd_ca/templates/openssl_append.j2 create mode 100644 roles/etcd_ca/vars/main.yml create mode 100644 roles/etcd_certificates/README.md create mode 100644 roles/etcd_certificates/meta/main.yml create mode 100644 roles/etcd_certificates/tasks/client.yml create mode 100644 roles/etcd_certificates/tasks/main.yml create mode 100644 roles/etcd_certificates/tasks/server.yml create mode 100644 roles/etcd_certificates/vars/main.yml delete mode 100644 roles/openshift_etcd_certs/README.md delete mode 100644 roles/openshift_etcd_certs/meta/main.yml delete mode 100644 roles/openshift_etcd_certs/tasks/main.yml delete mode 100644 roles/openshift_etcd_certs/vars/main.yml (limited to 'playbooks') diff --git a/playbooks/byo/etcd/config.yml b/playbooks/byo/etcd/config.yml deleted file mode 100644 index 0c96b2541..000000000 --- a/playbooks/byo/etcd/config.yml +++ /dev/null @@ -1,7 +0,0 @@ -## deploys a simple etcd cluster, this cluster does not provide client side ssl -## and cannot be used directly for openshift. This should only be used for testing. ---- -- name: Configure etcd - hosts: etcd - roles: - - etcd diff --git a/playbooks/byo/etcd/filter_plugins b/playbooks/byo/etcd/filter_plugins deleted file mode 120000 index b0b7a3414..000000000 --- a/playbooks/byo/etcd/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/byo/etcd/roles b/playbooks/byo/etcd/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/byo/etcd/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml deleted file mode 100644 index 381f139de..000000000 --- a/playbooks/byo/openshift-etcd/config.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Populate oo_etcd_hosts_to_config and oo_first_master host groups - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_etcd_hosts_to_config - add_host: - name: "{{ item }}" - groups: oo_etcd_hosts_to_config - with_items: groups.etcd - - name: Evaluate oo_first_master - add_host: - name: "{{ item }}" - groups: oo_first_master - with_items: groups.masters.0 - - -- include: ../../common/openshift-etcd/config.yml - vars: - openshift_first_master: "{{ groups.masters.0 }}" diff --git a/playbooks/byo/openshift-etcd/filter_plugins b/playbooks/byo/openshift-etcd/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/byo/openshift-etcd/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-etcd/roles b/playbooks/byo/openshift-etcd/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/byo/openshift-etcd/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml new file mode 100644 index 000000000..1a6580795 --- /dev/null +++ b/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml @@ -0,0 +1,13 @@ +--- +- set_fact: k8s_type="etcd" + +- name: Generate etcd instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: etcd_names_output + with_sequence: count={{ num_etcd }} + +- set_fact: + etcd_names: "{{ etcd_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 2c920df49..3cc561ba0 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,30 +1,32 @@ --- -- name: Gather and set facts for etcd hosts - hosts: oo_etcd_hosts_to_config +- name: Set etcd facts needed for generating certs + hosts: oo_etcd_to_config roles: - openshift_facts tasks: - openshift_facts: - role: common - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - - name: Check for etcd certificates + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + deployment_type: "{{ openshift_deployment_type }}" + - name: Check status of etcd certificates stat: path: "{{ item }}" with_items: - - "/etc/etcd/ca.crt" - - "/etc/etcd/client.crt" - - "/etc/etcd/client.key" - - "/etc/etcd/peer-ca.crt" - - "/etc/etcd/peer.crt" - - "/etc/etcd/peer.key" - register: g_etcd_certs_stat + - /etc/etcd/server.crt + - /etc/etcd/peer.crt + - /etc/etcd/ca.crt + register: g_etcd_server_cert_stat_result - set_fact: - etcd_certs_missing: "{{ g_etcd_certs_stat.results | map(attribute='stat.exists') - | list | intersect([false])}}" - etcd_subdir: etcd-{{ openshift.common.hostname }} - etcd_dir: /etc/openshift/generated-configs/etcd-{{ openshift.common.hostname }} - etcd_cert_dir: /etc/etcd + etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists') + | list | intersect([false])}}" + etcd_cert_subdir: etcd-{{ openshift.common.hostname }} + etcd_cert_config_dir: /etc/etcd + etcd_cert_prefix: - name: Create temp directory for syncing certs hosts: localhost @@ -37,65 +39,53 @@ register: g_etcd_mktemp changed_when: False -- name: Create etcd certs - hosts: oo_first_master +- name: Configure etcd certificates + hosts: oo_first_etcd vars: - etcd_hosts_needing_certs: "{{ hostvars - | oo_select_keys(groups['oo_etcd_hosts_to_config']) - | oo_filter_list(filter_attr='etcd_certs_missing') }}" - etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_hosts_to_config']) }}" + etcd_generated_certs_dir: /etc/etcd/generated_certs + etcd_needing_server_certs: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config']) + | oo_filter_list(filter_attr='etcd_server_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" roles: - - openshift_etcd_certs + - etcd_certificates post_tasks: - name: Create a tarball of the etcd certs command: > - tar -czvf {{ item.etcd_dir }}.tgz - -C {{ item.etcd_dir }} . + tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz + -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . args: - creates: "{{ item.etcd_dir }}.tgz" - with_items: etcd_hosts_needing_certs - - - name: Retrieve the etcd cert tarballs from the master + creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + with_items: etcd_needing_server_certs + - name: Retrieve the etcd cert tarballs fetch: - src: "{{ item.etcd_dir }}.tgz" + src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" dest: "{{ sync_tmpdir }}/" flat: yes fail_on_missing: yes validate_checksum: yes - with_items: etcd_hosts_needing_certs + with_items: etcd_needing_server_certs -- name: Deploy etcd - hosts: oo_etcd_hosts_to_config +- name: Configure etcd hosts + hosts: oo_etcd_to_config vars: sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" etcd_url_scheme: https + etcd_peer_url_scheme: https + etcd_peers_group: oo_etcd_to_config pre_tasks: - name: Ensure certificate directory exists file: - path: "{{ etcd_cert_dir }}" + path: "{{ etcd_cert_config_dir }}" state: directory - - name: Unarchive the tarball on the node + - name: Unarchive the tarball on the etcd host unarchive: - src: "{{ sync_tmpdir }}/{{ etcd_subdir }}.tgz" - dest: "{{ etcd_cert_dir }}" - when: etcd_certs_missing - - file: path=/etc/etcd/client.crt mode=0600 owner=etcd group=etcd - - file: path=/etc/etcd/client.key mode=0600 owner=etcd group=etcd - - file: path=/etc/etcd/ca.crt mode=0644 owner=etcd group=etcd + src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz" + dest: "{{ etcd_cert_config_dir }}" + when: etcd_server_certs_missing roles: - etcd -- name: Delete the temporary directory on the master - hosts: oo_first_master - gather_facts: no - vars: - sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" - tasks: - - file: name={{ sync_tmpdir }} state=absent - changed_when: False - - name: Delete temporary directory on localhost hosts: localhost connection: local diff --git a/playbooks/common/openshift-etcd/lookup_plugins b/playbooks/common/openshift-etcd/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-etcd/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml new file mode 100644 index 000000000..0bf69b22f --- /dev/null +++ b/playbooks/common/openshift-etcd/service.yml @@ -0,0 +1,18 @@ +--- +- name: Populate g_service_masters host group if needed + hosts: localhost + gather_facts: no + tasks: + - fail: msg="new_cluster_state is required to be injected in this playbook" + when: new_cluster_state is not defined + + - name: Evaluate g_service_etcd + add_host: name={{ item }} groups=g_service_etcd + with_items: oo_host_group_exp | default([]) + +- name: Change etcd state on etcd instance(s) + hosts: g_service_etcd + connection: ssh + gather_facts: no + tasks: + - service: name=etcd state="{{ new_cluster_state }}" diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index f6281101f..0fb45f37c 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -5,12 +5,13 @@ etcd_peer_port: 2380 etcd_peers_group: etcd etcd_url_scheme: http etcd_peer_url_scheme: http -etcd_ca_file: /etc/etcd/ca.crt -etcd_cert_file: /etc/etcd/client.crt -etcd_key_file: /etc/etcd/client.key -etcd_peer_ca_file: /etc/etcd/ca.crt -etcd_peer_cert_file: /etc/etcd/peer.crt -etcd_peer_key_file: /etc/etcd/peer.key +etcd_conf_dir: /etc/etcd +etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" +etcd_key_file: "{{ etcd_conf_dir }}/server.key" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" etcd_initial_cluster_state: new etcd_initial_cluster_token: etcd-cluster-1 @@ -21,6 +22,8 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostn etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}" etcd_data_dir: /var/lib/etcd/ + +os_firewall_use_firewalld: False os_firewall_allow: - service: etcd port: "{{etcd_client_port}}/tcp" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index f952f84be..82b1a62b8 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -15,3 +15,5 @@ galaxy_info: categories: - cloud - system +dependencies: +- { role: os_firewall } diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 8ed803119..62e29324c 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,6 +1,38 @@ --- - name: Install etcd - yum: pkg=etcd state=present disable_gpg_check=yes + yum: pkg=etcd state=present + +- name: Validate permissions on the config dir + file: + path: "{{ etcd_conf_dir }}" + state: directory + owner: etcd + group: etcd + mode: 0700 + +- name: Validate permissions on certificate files + file: + path: "{{ item }}" + mode: 0600 + group: etcd + owner: etcd + when: etcd_url_scheme == 'https' + with_items: + - "{{ etcd_ca_file }}" + - "{{ etcd_cert_file }}" + - "{{ etcd_key_file }}" + +- name: Validate permissions on peer certificate files + file: + path: "{{ item }}" + mode: 0600 + group: etcd + owner: etcd + when: etcd_peer_url_scheme == 'https' + with_items: + - "{{ etcd_peer_ca_file }}" + - "{{ etcd_peer_cert_file }}" + - "{{ etcd_peer_key_file }}" - name: Write etcd global config file template: @@ -14,3 +46,5 @@ name: etcd state: started enabled: yes + +- pause: seconds=10 diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 5723b5089..801be2c97 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -8,31 +8,37 @@ {% endfor -%} {% endmacro -%} +{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %} ETCD_NAME={{ inventory_hostname }} +ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} +{% else %} +ETCD_NAME=default +{% endif %} ETCD_DATA_DIR={{ etcd_data_dir }} #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" -ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" -# + +{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %} #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} ETCD_INITIAL_CLUSTER={{ initial_cluster() }} ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} -ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} #ETCD_DISCOVERY="" #ETCD_DISCOVERY_SRV="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" -# +{% endif %} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} + #[proxy] #ETCD_PROXY="off" -# + #[security] {% if etcd_url_scheme == 'https' -%} ETCD_CA_FILE={{ etcd_ca_file }} diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md new file mode 100644 index 000000000..60a880e30 --- /dev/null +++ b/roles/etcd_ca/README.md @@ -0,0 +1,34 @@ +etcd_ca +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_ca/meta/main.yml b/roles/etcd_ca/meta/main.yml new file mode 100644 index 000000000..ce909b992 --- /dev/null +++ b/roles/etcd_ca/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml new file mode 100644 index 000000000..ab151fe5b --- /dev/null +++ b/roles/etcd_ca/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- file: + path: "{{ etcd_ca_dir }}/{{ item }}" + state: directory + mode: 0700 + owner: root + group: root + with_items: + - certs + - crl + - fragments + +- command: cp /etc/pki/tls/openssl.cnf ./ + args: + chdir: "{{ etcd_ca_dir }}/fragments" + creates: "{{ etcd_ca_dir }}/fragments/openssl.cnf" + +- template: + dest: "{{ etcd_ca_dir }}/fragments/openssl_append.cnf" + src: openssl_append.j2 + +- assemble: + src: "{{ etcd_ca_dir }}/fragments" + dest: "{{ etcd_ca_dir }}/openssl.cnf" + +- command: touch index.txt + args: + chdir: "{{ etcd_ca_dir }}" + creates: "{{ etcd_ca_dir }}/index.txt" + +- copy: + dest: "{{ etcd_ca_dir }}/serial" + content: "01" + force: no + +- command: > + openssl req -config openssl.cnf -newkey rsa:4096 + -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self + -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }} + args: + chdir: "{{ etcd_ca_dir }}" + creates: "{{ etcd_ca_dir }}/ca.crt" + environment: + SAN: '' diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd_ca/templates/openssl_append.j2 new file mode 100644 index 000000000..de2adaead --- /dev/null +++ b/roles/etcd_ca/templates/openssl_append.j2 @@ -0,0 +1,51 @@ + +[ etcd_v3_req ] +basicConstraints = critical,CA:FALSE +keyUsage = digitalSignature,keyEncipherment +subjectAltName = ${ENV::SAN} + +[ etcd_ca ] +dir = {{ etcd_ca_dir }} +crl_dir = $dir/crl +database = $dir/index.txt +new_certs_dir = $dir/certs +certificate = $dir/ca.crt +serial = $dir/serial +private_key = $dir/ca.key +crl_number = $dir/crlnumber +x509_extensions = etcd_v3_ca_client +default_days = 365 +default_md = sha256 +preserve = no +name_opt = ca_default +cert_opt = ca_default +policy = policy_anything +unique_subject = no +copy_extensions = copy + +[ etcd_v3_ca_self ] +authorityKeyIdentifier = keyid,issuer +basicConstraints = critical,CA:TRUE,pathlen:0 +keyUsage = critical,digitalSignature,keyEncipherment,keyCertSign +subjectKeyIdentifier = hash + +[ etcd_v3_ca_peer ] +authorityKeyIdentifier = keyid,issuer:always +basicConstraints = critical,CA:FALSE +extendedKeyUsage = clientAuth,serverAuth +keyUsage = digitalSignature,keyEncipherment +subjectKeyIdentifier = hash + +[ etcd_v3_ca_server ] +authorityKeyIdentifier = keyid,issuer:always +basicConstraints = critical,CA:FALSE +extendedKeyUsage = serverAuth +keyUsage = digitalSignature,keyEncipherment +subjectKeyIdentifier = hash + +[ etcd_v3_ca_client ] +authorityKeyIdentifier = keyid,issuer:always +basicConstraints = critical,CA:FALSE +extendedKeyUsage = clientAuth +keyUsage = digitalSignature,keyEncipherment +subjectKeyIdentifier = hash diff --git a/roles/etcd_ca/vars/main.yml b/roles/etcd_ca/vars/main.yml new file mode 100644 index 000000000..901e95027 --- /dev/null +++ b/roles/etcd_ca/vars/main.yml @@ -0,0 +1,3 @@ +--- +etcd_conf_dir: /etc/etcd +etcd_ca_dir: /etc/etcd/ca diff --git a/roles/etcd_certificates/README.md b/roles/etcd_certificates/README.md new file mode 100644 index 000000000..95f8f8aab --- /dev/null +++ b/roles/etcd_certificates/README.md @@ -0,0 +1,34 @@ +OpenShift etcd certificates +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_certificates/meta/main.yml b/roles/etcd_certificates/meta/main.yml new file mode 100644 index 000000000..41370fab4 --- /dev/null +++ b/roles/etcd_certificates/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: etcd_ca } diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml new file mode 100644 index 000000000..28f33f442 --- /dev/null +++ b/roles/etcd_certificates/tasks/client.yml @@ -0,0 +1,42 @@ +--- +- name: Ensure generated_certs directory present + file: + path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + state: directory + mode: 0700 + with_items: etcd_needing_client_certs + +- name: Create the client csr + command: > + openssl req -new -keyout {{ item.etcd_cert_prefix }}client.key + -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}client.csr + -reqexts {{ etcd_req_ext }} -batch -nodes + -subj /CN={{ item.openshift.common.hostname }} + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'client.csr' }}" + environment: + SAN: "IP:{{ item.openshift.common.ip }}" + with_items: etcd_needing_client_certs + +- name: Sign and create the client crt + command: > + openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}client.crt + -in {{ item.etcd_cert_prefix }}client.csr + -batch + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'client.crt' }}" + environment: + SAN: '' + with_items: etcd_needing_client_certs + +- file: + src: "{{ etcd_ca_cert }}" + dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt" + state: hard + with_items: etcd_needing_client_certs diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml new file mode 100644 index 000000000..da875e8ea --- /dev/null +++ b/roles/etcd_certificates/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- include: client.yml + when: etcd_needing_client_certs is defined and etcd_needing_client_certs + +- include: server.yml + when: etcd_needing_server_certs is defined and etcd_needing_server_certs + + + diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml new file mode 100644 index 000000000..727b7fa2c --- /dev/null +++ b/roles/etcd_certificates/tasks/server.yml @@ -0,0 +1,73 @@ +--- +- name: Ensure generated_certs directory present + file: + path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + state: directory + mode: 0700 + with_items: etcd_needing_server_certs + +- name: Create the server csr + command: > + openssl req -new -keyout {{ item.etcd_cert_prefix }}server.key + -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}server.csr + -reqexts {{ etcd_req_ext }} -batch -nodes + -subj /CN={{ item.openshift.common.hostname }} + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'server.csr' }}" + environment: + SAN: "IP:{{ item.openshift.common.ip }}" + with_items: etcd_needing_server_certs + +- name: Sign and create the server crt + command: > + openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}server.crt + -in {{ item.etcd_cert_prefix }}server.csr + -extensions {{ etcd_ca_exts_server }} -batch + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'server.crt' }}" + environment: + SAN: '' + with_items: etcd_needing_server_certs + +- name: Create the peer csr + command: > + openssl req -new -keyout {{ item.etcd_cert_prefix }}peer.key + -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}peer.csr + -reqexts {{ etcd_req_ext }} -batch -nodes + -subj /CN={{ item.openshift.common.hostname }} + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'peer.csr' }}" + environment: + SAN: "IP:{{ item.openshift.common.ip }}" + with_items: etcd_needing_server_certs + +- name: Sign and create the peer crt + command: > + openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }} + -out {{ item.etcd_cert_prefix }}peer.crt + -in {{ item.etcd_cert_prefix }}peer.csr + -extensions {{ etcd_ca_exts_peer }} -batch + args: + chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}" + creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' + ~ item.etcd_cert_prefix ~ 'peer.crt' }}" + environment: + SAN: '' + with_items: etcd_needing_server_certs + +- file: + src: "{{ etcd_ca_cert }}" + dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt" + state: hard + with_items: etcd_needing_server_certs + + diff --git a/roles/etcd_certificates/vars/main.yml b/roles/etcd_certificates/vars/main.yml new file mode 100644 index 000000000..0eaeeb82b --- /dev/null +++ b/roles/etcd_certificates/vars/main.yml @@ -0,0 +1,11 @@ +--- +etcd_conf_dir: /etc/etcd +etcd_ca_dir: /etc/etcd/ca +etcd_generated_certs_dir: /etc/etcd/generated_certs +etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" +etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" +etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" +etcd_ca_name: etcd_ca +etcd_req_ext: etcd_v3_req +etcd_ca_exts_peer: etcd_v3_ca_peer +etcd_ca_exts_server: etcd_v3_ca_server diff --git a/roles/openshift_etcd_certs/README.md b/roles/openshift_etcd_certs/README.md deleted file mode 100644 index efac6d9fe..000000000 --- a/roles/openshift_etcd_certs/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift etcd certs -======================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/openshift_etcd_certs/meta/main.yml b/roles/openshift_etcd_certs/meta/main.yml deleted file mode 100644 index 4847ba94b..000000000 --- a/roles/openshift_etcd_certs/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Scott Dodson - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.8 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- { role: openshift_facts } diff --git a/roles/openshift_etcd_certs/tasks/main.yml b/roles/openshift_etcd_certs/tasks/main.yml deleted file mode 100644 index 04b411117..000000000 --- a/roles/openshift_etcd_certs/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Create openshift_generated_configs_dir if it doesn't exist - file: - path: "{{ openshift_generated_configs_dir }}" - state: directory - -- name: Create openshift_generated_configs_dir for each etcd host - file: - path: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname}}" - state: directory - with_items: etcd_hosts_needing_certs - -- name: Generate the etcd client side certs - delegate_to: "{{ openshift_first_master }}" - command: > - {{ openshift.common.admin_binary }} create-server-cert - --cert=client.crt --key=client.key --overwrite=true - --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname, item.openshift.common.ip]|unique|join(",") }} - --signer-cert={{ openshift_master_ca_cert }} - --signer-key={{ openshift_master_ca_key }} - --signer-serial={{ openshift_master_ca_serial }} - args: - chdir: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}" - creates: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}/client.crt" - with_items: etcd_hosts_needing_certs - -- name: Copy CA cert - delegate_to: "{{ openshift_first_master }}" - command: "cp {{ openshift_master_ca_cert }} ." - args: - chdir: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}" - creates: "{{ openshift_generated_configs_dir }}/etcd-{{ item.openshift.common.hostname }}/ca.crt" - with_items: etcd_hosts_needing_certs diff --git a/roles/openshift_etcd_certs/vars/main.yml b/roles/openshift_etcd_certs/vars/main.yml deleted file mode 100644 index 3801b8427..000000000 --- a/roles/openshift_etcd_certs/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -openshift_node_config_dir: /etc/openshift/node -openshift_master_config_dir: /etc/openshift/master -openshift_generated_configs_dir: /etc/openshift/generated-configs -openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" -openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" -openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" -openshift_kube_api_version: v1beta3 diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index e9a2ceffb..aff822a23 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -374,7 +374,6 @@ def set_url_facts_if_unset(facts): if 'etcd_urls' not in facts['master']: etcd_urls = [] if etcd_hosts != '': - etcd_port = 2379 facts['master']['etcd_port'] = etcd_port facts['master']['embedded_etcd'] = False for host in etcd_hosts: @@ -718,11 +717,7 @@ class OpenShiftFacts(object): defaults['master'] = master if 'node' in roles: - node = dict(pod_cidr='', labels={}, annotations={}, portal_net='172.30.0.0/16') - node['resources_cpu'] = self.system_facts['processor_cores'] - node['resources_memory'] = int( - int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75 - ) + node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16') defaults['node'] = node return defaults diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index f6bd2bf2e..95da2d6f4 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -12,11 +12,6 @@ yum: pkg=openshift-master state=present register: install_result -# TODO: Is this necessary or was this a workaround for an old bug in packaging? -- name: Reload systemd units - command: systemctl daemon-reload - when: install_result | changed - - name: Set master OpenShift facts openshift_facts: role: master diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 3b8b18c39..bc766ec9b 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -18,19 +18,19 @@ corsAllowedOrigins: {% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %} - {{ origin }} {% endfor %} -{% if openshift.master.embedded_dns %} +{% if openshift.master.embedded_dns | bool %} dnsConfig: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }} {% endif %} etcdClientInfo: - ca: ca.crt + ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: {% for etcd_url in openshift.master.etcd_urls %} - {{ etcd_url }} {% endfor %} -{% if openshift.master.embedded_etcd %} +{% if openshift.master.embedded_etcd | bool %} etcdConfig: address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }} peerAddress: {{ openshift.common.hostname }}:7001 @@ -61,7 +61,7 @@ kubeletClientInfo: certFile: master.kubelet-client.crt keyFile: master.kubelet-client.key port: 10250 -{% if openshift.master.embedded_kube %} +{% if openshift.master.embedded_kube | bool %} kubernetesMasterConfig: apiLevels: - v1beta3 diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 770b55351..53b325e4d 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -10,11 +10,6 @@ register: sdn_install_result when: openshift.common.use_openshift_sdn -- name: Reload systemd units - command: systemctl daemon-reload - when: (node_install_result | changed or (openshift.common.use_openshift_sdn - and sdn_install_result | changed)) - - name: Set node OpenShift facts openshift_facts: role: "{{ item.role }}" @@ -27,9 +22,6 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - resources_cpu: "{{ openshift_node_resources_cpu | default(none) }}" - resources_memory: "{{ openshift_node_resources_memory | default(none) }}" - pod_cidr: "{{ openshift_node_pod_cidr | default(none) }}" labels: "{{ openshift_node_labels | default(none) }}" annotations: "{{ openshift_node_annotations | default(none) }}" registry_url: "{{ oreg_url | default(none) }}" diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml index 3801b8427..a018bb0f9 100644 --- a/roles/openshift_node_certificates/vars/main.yml +++ b/roles/openshift_node_certificates/vars/main.yml @@ -5,4 +5,3 @@ openshift_generated_configs_dir: /etc/openshift/generated-configs openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" -openshift_kube_api_version: v1beta3 -- cgit v1.2.3 From f752eaccbb1a5f0e2c1d36502f755d022a21d073 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 10 Jul 2015 15:04:26 -0400 Subject: Playbook updates for clustered etcd - Add support to bin/cluster for specifying etcd hosts - defaults to 0, if no etcd hosts are selected, then configures embedded etcd - Updates for the byo inventory file for etcd and master as node by default - Consolidation of cluster logic more centrally into common playbook - Added etcd config support to playbooks - Restructured byo playbooks to leverage the common openshift-cluster playbook - Added support to common master playbook to generate and apply external etcd client certs from the etcd ca - start of refactor for better handling of master certs in a multi-master environment. - added the openshift_master_ca and openshift_master_certificates roles to manage master certs instead of generating them in the openshift_master role - added etcd host groups to the cluster update playbooks - aded better handling of host groups when they are either not present or are empty. - Update AWS readme --- README_AWS.md | 20 ++- bin/cluster | 3 + filter_plugins/oo_filters.py | 6 +- inventory/byo/hosts | 6 +- playbooks/aws/openshift-cluster/config.yml | 33 +--- playbooks/aws/openshift-cluster/launch.yml | 14 +- .../openshift-cluster/tasks/launch_instances.yml | 9 + playbooks/aws/openshift-cluster/update.yml | 4 +- playbooks/byo/config.yml | 12 +- playbooks/byo/openshift-cluster/config.yml | 9 + playbooks/byo/openshift-cluster/filter_plugins | 1 + playbooks/byo/openshift-cluster/lookup_plugins | 1 + playbooks/byo/openshift-cluster/roles | 1 + playbooks/byo/openshift-master/config.yml | 15 -- playbooks/byo/openshift-master/filter_plugins | 1 - playbooks/byo/openshift-master/lookup_plugins | 1 - playbooks/byo/openshift-master/roles | 1 - playbooks/byo/openshift-node/config.yml | 23 --- playbooks/byo/openshift-node/filter_plugins | 1 - playbooks/byo/openshift-node/lookup_plugins | 1 - playbooks/byo/openshift-node/roles | 1 - playbooks/common/openshift-cluster/config.yml | 61 +++++++ .../set_master_launch_facts_tasks.yml | 6 +- .../set_node_launch_facts_tasks.yml | 6 +- playbooks/common/openshift-master/config.yml | 199 ++++++++++++++++++++- playbooks/common/openshift-node/config.yml | 39 ++-- playbooks/gce/openshift-cluster/config.yml | 34 ++-- playbooks/gce/openshift-cluster/update.yml | 4 +- playbooks/libvirt/openshift-cluster/config.yml | 33 +--- playbooks/libvirt/openshift-cluster/update.yml | 4 +- playbooks/openstack/openshift-cluster/config.yml | 33 +--- playbooks/openstack/openshift-cluster/update.yml | 4 +- roles/openshift_master/tasks/main.yml | 12 +- roles/openshift_master_ca/README.md | 34 ++++ roles/openshift_master_ca/meta/main.yml | 16 ++ roles/openshift_master_ca/tasks/main.yml | 22 +++ roles/openshift_master_ca/vars/main.yml | 5 + roles/openshift_master_certificates/README.md | 34 ++++ roles/openshift_master_certificates/meta/main.yml | 16 ++ roles/openshift_master_certificates/tasks/main.yml | 24 +++ roles/openshift_master_certificates/vars/main.yml | 6 + roles/openshift_node/tasks/main.yml | 6 + roles/openshift_node/templates/node.yaml.v1.j2 | 4 +- roles/openshift_node_certificates/tasks/main.yml | 1 + 44 files changed, 558 insertions(+), 208 deletions(-) create mode 100644 playbooks/byo/openshift-cluster/config.yml create mode 120000 playbooks/byo/openshift-cluster/filter_plugins create mode 120000 playbooks/byo/openshift-cluster/lookup_plugins create mode 120000 playbooks/byo/openshift-cluster/roles delete mode 100644 playbooks/byo/openshift-master/config.yml delete mode 120000 playbooks/byo/openshift-master/filter_plugins delete mode 120000 playbooks/byo/openshift-master/lookup_plugins delete mode 120000 playbooks/byo/openshift-master/roles delete mode 100644 playbooks/byo/openshift-node/config.yml delete mode 120000 playbooks/byo/openshift-node/filter_plugins delete mode 120000 playbooks/byo/openshift-node/lookup_plugins delete mode 120000 playbooks/byo/openshift-node/roles create mode 100644 roles/openshift_master_ca/README.md create mode 100644 roles/openshift_master_ca/meta/main.yml create mode 100644 roles/openshift_master_ca/tasks/main.yml create mode 100644 roles/openshift_master_ca/vars/main.yml create mode 100644 roles/openshift_master_certificates/README.md create mode 100644 roles/openshift_master_certificates/meta/main.yml create mode 100644 roles/openshift_master_certificates/tasks/main.yml create mode 100644 roles/openshift_master_certificates/vars/main.yml (limited to 'playbooks') diff --git a/README_AWS.md b/README_AWS.md index 5db36b5cb..0e3128a92 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -20,10 +20,11 @@ Create a credentials file ``` Note: You must source this file before running any Ansible commands. +Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format. (Optional) Setup your $HOME/.ssh/config file ------------------------------------------- -In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' +In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config` to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. @@ -62,10 +63,16 @@ Node specific defaults: If needed, these values can be changed by setting environment variables on your system. - export ec2_instance_type='m3.large' -- export ec2_ami='ami-307b3658' +- export ec2_image='ami-307b3658' - export ec2_region='us-east-1' - export ec2_keypair='libra' - export ec2_security_groups="['public']" +- export ec2_vpc_subnet='my_vpc_subnet' +- export ec2_assign_public_ip='true' +- export os_etcd_root_vol_size='20' +- export os_etcd_root_vol_type='standard' +- export os_etcd_vol_size='20' +- export os_etcd_vol_type='standard' - export os_master_root_vol_size='20' - export os_master_root_vol_type='standard' - export os_node_root_vol_size='15' @@ -114,3 +121,12 @@ Terminating a cluster ``` bin/cluster terminate aws ``` + +Specifying a deployment type +--------------------------- +The --deployment-type flag can be passed to bin/cluster to specify the deployment type +1. To launch an online cluster (requires access to private repositories and amis): +``` + bin/cluster create aws --deployment-type=online +``` +Note: If no deployment type is specified, then the default is origin. diff --git a/bin/cluster b/bin/cluster index 0d760c342..746c0349a 100755 --- a/bin/cluster +++ b/bin/cluster @@ -51,6 +51,7 @@ class Cluster(object): env['num_masters'] = args.masters env['num_nodes'] = args.nodes + env['num_etcd'] = args.etcd return self.action(args, inventory, env, playbook) @@ -261,6 +262,8 @@ if __name__ == '__main__': help='number of masters to create in cluster') create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') + create_parser.add_argument('-e', '--etcd', default=0, type=int, + help='number of external etcd hosts to create in cluster') create_parser.set_defaults(func=cluster.create) config_parser = action_parser.add_parser('config', diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 0f3f4fa9e..30b8819b8 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -175,9 +175,9 @@ class FilterModule(object): ''' if not issubclass(type(data), dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") - if host_type not in ['master', 'node']: - raise errors.AnsibleFilterError("|failed expects either master or node" - " host type") + if host_type not in ['master', 'node', 'etcd']: + raise errors.AnsibleFilterError("|failed expects etcd, master or node" + " as the host type") root_vol = data[host_type]['root'] root_vol['device_name'] = '/dev/sda1' diff --git a/inventory/byo/hosts b/inventory/byo/hosts index ab54ce2db..a9add6a60 100644 --- a/inventory/byo/hosts +++ b/inventory/byo/hosts @@ -4,6 +4,7 @@ [OSEv3:children] masters nodes +etcd # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -33,7 +34,10 @@ openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': [masters] ose3-master-ansible.test.example.com +[etcd] +#ose3-master-ansible.test.example.com + # host group for nodes [nodes] -#ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" +ose3-master-ansible.test.example.com openshift_scheduleable=False ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 7188312ed..6ee539c7e 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,37 +1,22 @@ --- -- name: Populate oo_masters_to_config host group - hosts: localhost +- hosts: localhost gather_facts: no vars_files: - vars.yml tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups" + - set_fact: + g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - include: ../../common/openshift-cluster/config.yml vars: + g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" + g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" + g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" + g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" + g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 33e1ec25d..5db87fa90 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -11,6 +11,13 @@ msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' + - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ etcd_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml - include: tasks/launch_instances.yml vars: @@ -25,9 +32,10 @@ cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" - - set_fact: - a_master: "{{ master_names[0] }}" - - add_host: name={{ a_master }} groups=service_master + - add_host: + name: "{{ master_names.0 }}" + groups: service_master + when: master_names is defined and master_names.0 is defined - include: update.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index d643b647d..25a87aaf6 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -53,6 +53,15 @@ latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" user_data: "{{ lookup('template', '../templates/user_data.j2') }}" volume_defs: + etcd: + root: + volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}" + device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}" + etcd: + volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}" + device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}" master: root: volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index 5e7ab4e58..e006aa74a 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -11,7 +11,9 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([]) + with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) + | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) + | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml index 092eb9978..7d03914a2 100644 --- a/playbooks/byo/config.yml +++ b/playbooks/byo/config.yml @@ -1,12 +1,2 @@ --- -- name: Run the openshift-master config playbook - include: openshift-master/config.yml - when: groups.masters is defined and groups.masters - -- name: Run the openshift-etcd playbook - include: openshift-etcd/config.yml - when: groups.etcd is defined and groups.etcd - -- name: Run the openshift-node config playbook - include: openshift-node/config.yml - when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters +- include: openshift-cluster/config.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml new file mode 100644 index 000000000..2ee1d50a7 --- /dev/null +++ b/playbooks/byo/openshift-cluster/config.yml @@ -0,0 +1,9 @@ +--- +- include: ../../common/openshift-cluster/config.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/filter_plugins b/playbooks/byo/openshift-cluster/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-cluster/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-cluster/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-cluster/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/roles b/playbooks/byo/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml deleted file mode 100644 index f61d277c6..000000000 --- a/playbooks/byo/openshift-master/config.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Populate oo_masters_to_config host group - hosts: localhost - gather_facts: no - tasks: - - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - with_items: groups['masters'] - -- include: ../../common/openshift-master/config.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/byo/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/byo/openshift-master/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/byo/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml deleted file mode 100644 index f50903061..000000000 --- a/playbooks/byo/openshift-node/config.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate oo_nodes_to_config and oo_first_master host groups - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - with_items: groups.nodes - - name: Evaluate oo_first_master - add_host: - name: "{{ item }}" - groups: oo_first_master - with_items: groups.masters.0 - - -- include: ../../common/openshift-node/config.yml - vars: - openshift_first_master: "{{ groups.masters.0 }}" - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/byo/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-node/lookup_plugins b/playbooks/byo/openshift-node/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/byo/openshift-node/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/byo/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 14ffa928f..0779cfe47 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,4 +1,65 @@ --- +- name: Populate config host groups + hosts: localhost + gather_facts: no + tasks: + - fail: + msg: This playbook rquires g_etcd_group to be set + when: g_etcd_group is not defined + + - fail: + msg: This playbook rquires g_masters_group to be set + when: g_masters_group is not defined + + - fail: + msg: This playbook rquires g_nodes_group to be set + when: g_nodes_group is not defined + + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_etcd_group] | default([]) + + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_nodes_group] | default([]) + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ groups[g_etcd_group][0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups[g_masters_group][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + +- include: ../openshift-etcd/config.yml + - include: ../openshift-master/config.yml - include: ../openshift-node/config.yml + vars: + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml index 118727273..36d7b7870 100644 --- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml @@ -5,7 +5,9 @@ set_fact: scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: master_names_output - with_sequence: start=1 end={{ num_masters }} + with_sequence: count={{ num_masters }} - set_fact: - master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" + master_names: "{{ master_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml index 162315d46..96e1a9a63 100644 --- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml @@ -5,7 +5,9 @@ set_fact: scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output - with_sequence: start=1 end={{ num_nodes }} + with_sequence: count={{ num_nodes }} - set_fact: - node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" + node_names: "{{ node_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 29c4d9c5c..3956128e1 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,19 +1,214 @@ --- +- name: Set master facts and determine if external etcd certs need to be generated + hosts: oo_masters_to_config + pre_tasks: + - set_fact: + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] + | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + deployment_type: "{{ openshift_deployment_type }}" + - role: master + local_facts: + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + - name: Check status of external etcd certificatees + stat: + path: "/etc/openshift/master/{{ item }}" + with_items: + - master.etcd-client.crt + - master.etcd-ca.crt + register: g_external_etcd_cert_stat_result + - set_fact: + etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results + | map(attribute='stat.exists') + | list | intersect([false])}}" + etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }} + etcd_cert_config_dir: /etc/openshift/master + etcd_cert_prefix: master.etcd- + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + sudo: false + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_master_mktemp + changed_when: False + +- name: Configure etcd certificates + hosts: oo_first_etcd + vars: + etcd_generated_certs_dir: /etc/etcd/generated_certs + etcd_needing_client_certs: "{{ hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_filter_list(filter_attr='etcd_client_certs_missing') }}" + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + roles: + - etcd_certificates + post_tasks: + - name: Create a tarball of the etcd certs + command: > + tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz + -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . + args: + creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + with_items: etcd_needing_client_certs + - name: Retrieve the etcd cert tarballs + fetch: + src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: etcd_needing_client_certs + +- name: Copy the external etcd certs to the masters + hosts: oo_masters_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + tasks: + - name: Ensure certificate directory exists + file: + path: /etc/openshift/master + state: directory + when: etcd_client_certs_missing is defined and etcd_client_certs_missing + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz" + dest: "{{ etcd_cert_config_dir }}" + when: etcd_client_certs_missing is defined and etcd_client_certs_missing + - file: + path: "{{ etcd_cert_config_dir }}/{{ item }}" + owner: root + group: root + mode: 0600 + with_items: + - master.etcd-client.crt + - master.etcd-client.key + - master.etcd-ca.crt + when: etcd_client_certs_missing is defined and etcd_client_certs_missing + +- name: Determine if master certificates need to be generated + hosts: oo_masters_to_config + tasks: + - set_fact: + openshift_master_certs_no_etcd: + - admin.crt + - master.kubelet-client.crt + - master.server.crt + - openshift-master.crt + - openshift-registry.crt + - openshift-router.crt + - etcd.server.crt + openshift_master_certs_etcd: + - master.etcd-client.crt + - set_fact: + openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}" + + - name: Check status of master certificates + stat: + path: "/etc/openshift/master/{{ item }}" + with_items: openshift_master_certs + register: g_master_cert_stat_result + - set_fact: + master_certs_missing: "{{ g_master_cert_stat_result.results + | map(attribute='stat.exists') + | list | intersect([false])}}" + master_cert_subdir: master-{{ openshift.common.hostname }} + master_cert_config_dir: /etc/openshift/master + +- name: Configure master certificates + hosts: oo_first_master + vars: + master_generated_certs_dir: /etc/openshift/generated-configs + masters_needing_certs: "{{ hostvars + | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master'])) + | oo_filter_list(filter_attr='master_certs_missing') }}" + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + roles: + - openshift_master_certificates + post_tasks: + - name: Create a tarball of the master certs + command: > + tar -czvf {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz + -C {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }} . + args: + creates: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz" + with_items: masters_needing_certs + - name: Retrieve the master cert tarball from the master + fetch: + src: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: masters_needing_certs + - name: Configure master instances hosts: oo_masters_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + pre_tasks: + - name: Ensure certificate directory exists + file: + path: /etc/openshift/master + state: directory + when: master_certs_missing and 'oo_first_master' not in group_names + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz" + dest: "{{ master_cert_config_dir }}" + when: master_certs_missing and 'oo_first_master' not in group_names roles: - openshift_master - - openshift_examples - role: fluentd_master when: openshift.common.use_fluentd | bool - tasks: + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False +- name: Deploy OpenShift examples + hosts: oo_first_master + roles: + - openshift_examples + # Additional instance config for online deployments - name: Additional instance config hosts: oo_masters_deployment_type_online roles: - pods - os_env_extras + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + sudo: false + gather_facts: no + tasks: + - file: name={{ g_master_mktemp.stdout }} state=absent + changed_when: False diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 1cf5616ce..bd35008b8 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -18,21 +18,18 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshift_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}" labels: "{{ openshift_node_labels | default(None) }}" annotations: "{{ openshift_node_annotations | default(None) }}" - name: Check status of node certificates stat: - path: "{{ item }}" + path: "/etc/openshift/node/{{ item }}" with_items: - - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.crt" - - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.key" - - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.kubeconfig" - - "/etc/openshift/node/ca.crt" - - "/etc/openshift/node/server.key" - - "/etc/openshift/node/server.crt" + - "system:node:{{ openshift.common.hostname }}.crt" + - "system:node:{{ openshift.common.hostname }}.key" + - "system:node:{{ openshift.common.hostname }}.kubeconfig" + - ca.crt + - server.key + - server.crt register: stat_result - set_fact: certs_missing: "{{ stat_result.results | map(attribute='stat.exists') @@ -56,10 +53,9 @@ hosts: oo_first_master vars: nodes_needing_certs: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) + | oo_select_keys(groups['oo_nodes_to_config'] + | default([])) | oo_filter_list(filter_attr='certs_missing') }}" - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" roles: - openshift_node_certificates @@ -86,7 +82,7 @@ hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[openshift_first_master].openshift.master.api_url }}" + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -110,15 +106,6 @@ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} changed_when: False -- name: Delete the temporary directory on the master - hosts: oo_first_master - gather_facts: no - vars: - sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - tasks: - - file: name={{ sync_tmpdir }} state=absent - changed_when: False - - name: Delete temporary directory on localhost hosts: localhost connection: local @@ -143,12 +130,14 @@ | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) + | oo_select_keys(groups['oo_nodes_to_config'] + | default([])) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" pre_tasks: - set_fact: openshift_scheduleable_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) + | oo_select_keys(groups['oo_nodes_to_config'] + | default([])) | oo_collect('openshift.common.hostname') | difference(openshift_unscheduleable_nodes) }}" roles: diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 8c320dbd2..219ebe6a0 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,38 +1,24 @@ --- # TODO: fix firewall related bug with GCE and origin, since GCE is overriding # /etc/sysconfig/iptables -- name: Populate oo_masters_to_config host group - hosts: localhost + +- hosts: localhost gather_facts: no vars_files: - vars.yml tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + - set_fact: + g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - include: ../../common/openshift-cluster/config.yml vars: + g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" + g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" + g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" + g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" + g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 9ebf39a13..8096aa654 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -11,7 +11,9 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) + with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) + | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) + | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 75e2005a2..98fe11251 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -3,37 +3,22 @@ # is localhost, so no hostname value (or public_hostname) value is getting # assigned -- name: Populate oo_masters_to_config host group - hosts: localhost +- hosts: localhost gather_facts: no vars_files: - vars.yml tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_masters_to_config - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_nodes_to_config - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_first_master - when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + - set_fact: + g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - include: ../../common/openshift-cluster/config.yml vars: + g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" + g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" + g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" + g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" + g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index 57e36db9e..d09832c16 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -11,7 +11,9 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) + with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) + | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) + | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index abadaf5ca..3c9a231e3 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,35 +1,20 @@ -- name: Populate oo_masters_to_config host group - hosts: localhost +- hosts: localhost gather_facts: no vars_files: - vars.yml tasks: - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_masters_to_config - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_nodes_to_config - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_first_master - when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups" + - set_fact: + g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - include: ../../common/openshift-cluster/config.yml vars: + g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" + g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" + g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" + g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" + g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" - openshift_first_master: "{{ groups.oo_first_master.0 }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 5e7ab4e58..e006aa74a 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -11,7 +11,9 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([]) + with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) + | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) + | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 95da2d6f4..b4d0ec0ad 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -26,7 +26,7 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - etcd_hosts: "{{ groups['etcd'] | default(None)}}" + etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}" etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" @@ -61,16 +61,6 @@ path: "{{ openshift_master_config_dir }}" state: directory -- name: Create the master certificates if they do not already exist - command: > - {{ openshift.common.admin_binary }} create-master-certs - --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }} - --master={{ openshift.master.api_url }} - --public-master={{ openshift.master.public_api_url }} - --cert-dir={{ openshift_master_config_dir }} --overwrite=false - args: - creates: "{{ openshift_master_config_dir }}/master.server.key" - - name: Create the policy file if it does not already exist command: > {{ openshift.common.admin_binary }} create-bootstrap-policy-file diff --git a/roles/openshift_master_ca/README.md b/roles/openshift_master_ca/README.md new file mode 100644 index 000000000..5b2d3601b --- /dev/null +++ b/roles/openshift_master_ca/README.md @@ -0,0 +1,34 @@ +OpenShift Master CA +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_ca/meta/main.yml b/roles/openshift_master_ca/meta/main.yml new file mode 100644 index 000000000..f3236e850 --- /dev/null +++ b/roles/openshift_master_ca/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml new file mode 100644 index 000000000..8163ecd7f --- /dev/null +++ b/roles/openshift_master_ca/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Install the OpenShift package for admin tooling + yum: pkg=openshift state=present + register: install_result + +- name: Reload generated facts + openshift_facts: + +- name: Create openshift_master_config_dir if it doesn't exist + file: + path: "{{ openshift_master_config_dir }}" + state: directory + +- name: Create the master certificates if they do not already exist + command: > + {{ openshift.common.admin_binary }} create-master-certs + --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }} + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} + --cert-dir={{ openshift_master_config_dir }} --overwrite=false + args: + creates: "{{ openshift_master_config_dir }}/master.server.key" diff --git a/roles/openshift_master_ca/vars/main.yml b/roles/openshift_master_ca/vars/main.yml new file mode 100644 index 000000000..2925680bb --- /dev/null +++ b/roles/openshift_master_ca/vars/main.yml @@ -0,0 +1,5 @@ +--- +openshift_master_config_dir: /etc/openshift/master +openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" +openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" +openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" diff --git a/roles/openshift_master_certificates/README.md b/roles/openshift_master_certificates/README.md new file mode 100644 index 000000000..ba3d5f28c --- /dev/null +++ b/roles/openshift_master_certificates/README.md @@ -0,0 +1,34 @@ +OpenShift Master Certificates +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml new file mode 100644 index 000000000..fd7b73b0f --- /dev/null +++ b/roles/openshift_master_certificates/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_master_ca } diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml new file mode 100644 index 000000000..b5a3f8e40 --- /dev/null +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Ensure the generated_configs directory present + file: + path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}" + state: directory + mode: 0700 + with_items: masters_needing_certs + +- file: + src: "{{ openshift_master_ca_cert }}" + dest: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/ca.crt" + with_items: masters_needing_certs + +- name: Create the master certificates if they do not already exist + command: > + {{ openshift.common.admin_binary }} create-master-certs + --hostnames={{ item.openshift.common.hostname }},{{ item.openshift.common.public_hostname }} + --master={{ item.openshift.master.api_url }} + --public-master={{ item.openshift.master.public_api_url }} + --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }} + --overwrite=false + args: + creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt" + with_items: masters_needing_certs diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml new file mode 100644 index 000000000..6e577b13b --- /dev/null +++ b/roles/openshift_master_certificates/vars/main.yml @@ -0,0 +1,6 @@ +--- +openshift_generated_configs_dir: /etc/openshift/generated-configs +openshift_master_config_dir: /etc/openshift/master +openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" +openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" +openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 53b325e4d..e18846db8 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,5 +1,11 @@ --- # TODO: allow for overriding default ports where possible +- fail: + msg: This role requres that osn_cluster_dns_domain is set + when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain +- fail: + msg: This role requres that osn_cluster_dns_ip is set + when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip - name: Install OpenShift Node package yum: pkg=openshift-node state=present diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index f313f6a4b..7778a2a61 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -1,7 +1,7 @@ allowDisabledDocker: false apiVersion: v1 -dnsDomain: {{ hostvars[openshift_first_master].openshift.dns.domain }} -dnsIP: {{ hostvars[openshift_first_master].openshift.dns.ip }} +dnsDomain: {{ osn_cluster_dns_domain }} +dnsIP: {{ osn_cluster_dns_ip }} dockerConfig: execHandlerName: "" imageConfig: diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 949afc5eb..64a799dfb 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -3,6 +3,7 @@ file: path: "{{ openshift_generated_configs_dir }}" state: directory + when: nodes_needing_certs | length > 0 - name: Generate the node client config command: > -- cgit v1.2.3 From aefff9001ab43155696820f01db7cc11de5cfcea Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 10 Jul 2015 16:44:11 -0400 Subject: Add support for separate etcd volume with aws provider through bin/cluster --- filter_plugins/oo_filters.py | 7 +++++++ .../aws/openshift-cluster/templates/user_data.j2 | 20 ++++++++++++++++++++ playbooks/aws/openshift-cluster/terminate.yml | 2 +- 3 files changed, 28 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 30b8819b8..e8f60ca73 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -195,6 +195,13 @@ class FilterModule(object): docker_vol.pop('delete_on_termination', None) docker_vol['ephemeral'] = 'ephemeral0' return [root_vol, docker_vol] + elif host_type == 'etcd': + etcd_vol = data[host_type]['etcd'] + etcd_vol['device_name'] = '/dev/xvdb' + etcd_vol['delete_on_termination'] = True + if etcd_vol['device_type'] != 'io1': + etcd_vol.pop('iops', None) + return [root_vol, etcd_vol] return [root_vol] @staticmethod diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index db14bacd1..82c2f4d57 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,4 +1,24 @@ #cloud-config +{% if type =='etcd' %} +cloud_config_modules: +- disk_setup +- mounts + +mounts: +- [ xvdb, /var/lib/etcd, xfs, "defaults" ] + +disk_setup: + xvdb: + table_type: mbr + layout: True + +fs_setup: +- label: etcd_storage + filesystem: xfs + device: /dev/xvdb + partition: auto +{% endif %} + {% if type == 'node' %} mounts: - [ xvdb ] diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 361ab2d37..9c3703aba 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -43,7 +43,7 @@ # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} + - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }} when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" with_items: ec2_term.results -- cgit v1.2.3 From 4b439253e7b4486947d201714d4f52a4a7e0fc01 Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Thu, 25 Jun 2015 10:08:52 +0200 Subject: Make all the OpenStack resources be managed by a Heat Stack --- README_openstack.md | 14 +- filter_plugins/oo_filters.py | 72 +++++- .../openshift-cluster/files/heat_stack.yaml | 279 +++++++++++++++++++++ .../openshift-cluster/files/heat_stack.yml | 149 ----------- .../openshift-cluster/files/heat_stack_server.yaml | 123 +++++++++ playbooks/openstack/openshift-cluster/launch.yml | 116 +++++++-- .../openshift-cluster/tasks/launch_instances.yml | 48 ---- .../openstack/openshift-cluster/terminate.yml | 45 +--- playbooks/openstack/openshift-cluster/vars.yml | 22 +- 9 files changed, 594 insertions(+), 274 deletions(-) create mode 100644 playbooks/openstack/openshift-cluster/files/heat_stack.yaml delete mode 100644 playbooks/openstack/openshift-cluster/files/heat_stack.yml create mode 100644 playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml delete mode 100644 playbooks/openstack/openshift-cluster/tasks/launch_instances.yml (limited to 'playbooks') diff --git a/README_openstack.md b/README_openstack.md index 57977d1f5..3076e7b08 100644 --- a/README_openstack.md +++ b/README_openstack.md @@ -28,19 +28,15 @@ The following options can be passed via the `-o` flag of the `create` command: * `image_name`: Name of the image to use to spawn VMs * `keypair` (default to `${LOGNAME}_key`): Name of the ssh key * `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key -* `master_flavor_ram` (default to `2048`): VM flavor for the master (by amount of RAM) -* `master_flavor_id`: VM flavor for the master (by ID) -* `master_flavor_include`: VM flavor for the master (by name) -* `node_flavor_ram` (default to `4096`): VM flavor for the nodes (by amount of RAM) -* `node_flavor_id`: VM flavor for the nodes (by ID) -* `node_flavor_include`: VM flavor for the nodes (by name) -* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yml`): filename of the HEAT template to use to create the cluster infrastructure +* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master +* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the nodes +* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure -The following options are used only by `heat_stack.yml`. They are so used only if the `infra_heat_stack` option is left with its default value. +The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value. * `network_prefix` (default to `openshift-ansible-`): prefix prepended to all network objects (net, subnet, router, security groups) * `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use -* `net_cidr` (default to `192.168..0/24`): CIDR of the network created by `heat_stack.yml` +* `net_cidr` (default to `192.168..0/24`): CIDR of the network created by `heat_stack.yaml` * `external_net` (default to `external`): Name of the external network to connect to * `floating_ip_pools` (default to `external`): comma separated list of floating IP pools * `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 4e4a7309d..cd197c0fe 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -8,6 +8,8 @@ Custom filters for use in openshift-ansible from ansible import errors from operator import itemgetter import pdb +import re +import json class FilterModule(object): @@ -232,6 +234,73 @@ class FilterModule(object): rval.append({string: value}) return rval + @staticmethod + def oo_parse_heat_stack_outputs(data): + ''' Formats the HEAT stack output into a usable form + + The goal is to transform something like this: + + +---------------+-------------------------------------------------+ + | Property | Value | + +---------------+-------------------------------------------------+ + | capabilities | [] | | + | creation_time | 2015-06-26T12:26:26Z | | + | description | OpenShift cluster | | + | … | … | + | outputs | [ | + | | { | + | | "output_value": "value_A" | + | | "description": "This is the value of Key_A" | + | | "output_key": "Key_A" | + | | }, | + | | { | + | | "output_value": [ | + | | "value_B1", | + | | "value_B2" | + | | ], | + | | "description": "This is the value of Key_B" | + | | "output_key": "Key_B" | + | | }, | + | | ] | + | parameters | { | + | … | … | + +---------------+-------------------------------------------------+ + + into something like this: + + { + "Key_A": "value_A", + "Key_B": [ + "value_B1", + "value_B2" + ] + } + ''' + + # Extract the “outputs” JSON snippet from the pretty-printed array + in_outputs = False + outputs = '' + + line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|') + for line in data['stdout_lines']: + match = line_regex.match(line) + if match: + if match.group(1) == 'outputs': + in_outputs = True + elif match.group(1) != '': + in_outputs = False + if in_outputs: + outputs += match.group(2) + + outputs = json.loads(outputs) + + # Revamp the “outputs” to put it in the form of a “Key: value” map + revamped_outputs = {} + for output in outputs: + revamped_outputs[output['output_key']] = output['output_value'] + + return revamped_outputs + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -245,5 +314,6 @@ class FilterModule(object): "oo_combine_key_value": self.oo_combine_key_value, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, - "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict + "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, + "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs } diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml new file mode 100644 index 000000000..a15ec749c --- /dev/null +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -0,0 +1,279 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster + +parameters: + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + num_masters: + type: number + label: Number of masters + description: Number of masters + + num_nodes: + type: number + label: Number of nodes + description: Number of nodes + + cidr: + type: string + label: CIDR + description: CIDR of the network of the cluster + + dns_nameservers: + type: comma_delimited_list + label: DNS nameservers list + description: List of DNS nameservers + + external_net: + type: string + label: External network + description: Name of the external network + default: external + + ssh_public_key: + type: string + label: SSH public key + description: SSH public key + hidden: true + + ssh_incoming: + type: string + label: Source of ssh connections + description: Source of legitimate ssh connections + default: 0.0.0.0/0 + + master_image: + type: string + label: Master image + description: Name of the image for the master servers + + node_image: + type: string + label: Node image + description: Name of the image for the node servers + + master_flavor: + type: string + label: Master flavor + description: Flavor of the master servers + + node_flavor: + type: string + label: Node flavor + description: Flavor of the node servers + +outputs: + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ nodes, floating_ip ] } + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: { get_param: cluster_id } + network: { get_resource: net } + cidr: { get_param: cidr } + dns_nameservers: { get_param: dns_nameservers } + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: { get_param: cluster_id } + external_gateway_info: + network: { get_param: external_net } + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + + keypair: + type: OS::Nova::KeyPair + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-keypair + params: + cluster_id: { get_param: cluster_id } + public_key: { get_param: ssh_public_key } + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + + masters: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_masters } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: cluster_id-k8s_type-%index% + params: + cluster_id: { get_param: cluster_id } + k8s_type: master + cluster_id: { get_param: cluster_id } + type: master + image: { get_param: master_image } + flavor: { get_param: master_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + depends_on: interface + + nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_nodes } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: cluster_id-k8s_type-%index% + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + cluster_id: { get_param: cluster_id } + type: node + image: { get_param: node_image } + flavor: { get_param: node_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + depends_on: interface diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yml b/playbooks/openstack/openshift-cluster/files/heat_stack.yml deleted file mode 100644 index c5f95d87d..000000000 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yml +++ /dev/null @@ -1,149 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - cluster-id: - type: string - label: Cluster ID - description: Identifier of the cluster - - network-prefix: - type: string - label: Network prefix - description: Prefix of the network objects - - cidr: - type: string - label: CIDR - description: CIDR of the network of the cluster - - dns-nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external-net: - type: string - label: External network - description: Name of the external network - default: external - - ssh-incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - -resources: - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: network-prefix-net - params: - network-prefix: { get_param: network-prefix } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: network-prefix-subnet - params: - network-prefix: { get_param: network-prefix } - network: { get_resource: net } - cidr: { get_param: cidr } - dns_nameservers: { get_param: dns-nameservers } - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: network-prefix-router - params: - network-prefix: { get_param: network-prefix } - external_gateway_info: - network: { get_param: external-net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: network-prefix-node-secgrp - params: - network-prefix: { get_param: network-prefix } - description: - str_replace: - template: Security group for cluster-id OpenShift cluster nodes - params: - cluster-id: { get_param: cluster-id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh-incoming } - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: network-prefix-master-secgrp - params: - network-prefix: { get_param: network-prefix } - description: - str_replace: - template: Security group for cluster-id OpenShift cluster master - params: - cluster-id: { get_param: cluster-id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh-incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml new file mode 100644 index 000000000..55f64211a --- /dev/null +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -0,0 +1,123 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: { get_file: user-data } + user_data_format: RAW + metadata: + env: { get_param: cluster_id } + host-type: { get_param: type } + env-host-type: + str_template: + template: cluster_id-openshift-type + params: + cluster_id: { get_param: cluster_id } + type: { get_param: type } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 5c86ade3f..3cdd2ae4d 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -8,23 +8,105 @@ tasks: - fail: msg: "Deployment type not supported for OpenStack provider yet" - when: deployment_type in ['online', 'enterprise'] - - - include: tasks/configure_openstack.yml - - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" + when: deployment_type == 'online' + + # TODO: Write an Ansible module for dealing with HEAT stacks + # Dealing with the outputs is currently terrible + + - name: Check OpenStack stack + command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' + register: stack_show_result + changed_when: false + failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr + + - name: Create OpenStack Stack + command: 'heat stack-create -f {{ openstack_infra_heat_stack }} + -P cluster_id={{ cluster_id }} + -P dns_nameservers={{ openstack_network_dns | join(",") }} + -P cidr={{ openstack_network_cidr }} + -P ssh_incoming={{ openstack_ssh_access_from }} + -P num_masters={{ num_masters }} + -P num_nodes={{ num_nodes }} + -P master_image={{ deployment_vars[deployment_type].image }} + -P node_image={{ deployment_vars[deployment_type].image }} + -P master_flavor={{ openstack_flavor["master"] }} + -P node_flavor={{ openstack_flavor["node"] }} + -P ssh_public_key="{{ openstack_ssh_public_key }}" + openshift-ansible-{{ cluster_id }}-stack' + when: stack_show_result.rc == 1 + + - name: Update OpenStack Stack + command: 'heat stack-update -f {{ openstack_infra_heat_stack }} + -P cluster_id={{ cluster_id }} + -P dns_nameservers={{ openstack_network_dns | join(",") }} + -P cidr={{ openstack_network_cidr }} + -P ssh_incoming={{ openstack_ssh_access_from }} + -P num_masters={{ num_masters }} + -P num_nodes={{ num_nodes }} + -P master_image={{ deployment_vars[deployment_type].image }} + -P node_image={{ deployment_vars[deployment_type].image }} + -P master_flavor={{ openstack_flavor["master"] }} + -P node_flavor={{ openstack_flavor["node"] }} + -P ssh_public_key="{{ openstack_ssh_public_key }}" + openshift-ansible-{{ cluster_id }}-stack' + when: stack_show_result.rc == 0 + + - name: Wait for OpenStack Stack readiness + shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' + register: stack_show_status_result + until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] + retries: 30 + delay: 1 + failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] + + - name: Read OpenStack Stack outputs + command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' + register: stack_show_result + + - set_fact: + parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}" + + - name: Add new master instances groups and variables + add_host: + hostname: '{{ item[0] }}' + ansible_ssh_host: '{{ item[2] }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master' + with_together: + - parsed_outputs.master_names + - parsed_outputs.master_ips + - parsed_outputs.master_floating_ips + + - name: Add new node instances groups and variables + add_host: + hostname: '{{ item[0] }}' + ansible_ssh_host: '{{ item[2] }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node' + with_together: + - parsed_outputs.node_names + - parsed_outputs.node_ips + - parsed_outputs.node_floating_ips + + - name: Wait for ssh + wait_for: + host: '{{ item }}' + port: 22 + with_flattened: + - parsed_outputs.master_floating_ips + - parsed_outputs.node_floating_ips + + - name: Wait for user setup + command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' + register: result + until: result.rc == 0 + retries: 30 + delay: 1 + with_flattened: + - parsed_outputs.master_floating_ips + - parsed_outputs.node_floating_ips - include: update.yml diff --git a/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml b/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 1b9696aac..000000000 --- a/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: Get net id - shell: 'neutron net-show {{ openstack_network_prefix }}-net | awk "/\\/ {print \$4}"' - register: net_id_result - -- name: Launch instance(s) - nova_compute: - name: '{{ item }}' - image_name: '{{ deployment_vars[deployment_type].image.name | default(omit, true) }}' - image_id: '{{ deployment_vars[deployment_type].image.id | default(omit, true) }}' - flavor_ram: '{{ openstack_flavor[k8s_type].ram | default(omit, true) }}' - flavor_id: '{{ openstack_flavor[k8s_type].id | default(omit, true) }}' - flavor_include: '{{ openstack_flavor[k8s_type].include | default(omit, true) }}' - key_name: '{{ openstack_ssh_keypair }}' - security_groups: '{{ openstack_network_prefix }}-{{ k8s_type }}-secgrp' - nics: - - net-id: '{{ net_id_result.stdout }}' - user_data: "{{ lookup('file','files/user-data') }}" - meta: - env: '{{ cluster }}' - host-type: '{{ type }}' - env-host-type: '{{ cluster }}-openshift-{{ type }}' - floating_ip_pools: '{{ openstack_floating_ip_pools }}' - with_items: instances - register: nova_compute_result - -- name: Add new instances groups and variables - add_host: - hostname: '{{ item.item }}' - ansible_ssh_host: '{{ item.public_ip }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster }}, tag_host-type_{{ type }}, tag_env-host-type_{{ cluster }}-openshift-{{ type }}' - with_items: nova_compute_result.results - -- name: Wait for ssh - wait_for: - host: '{{ item.public_ip }}' - port: 22 - with_items: nova_compute_result.results - -- name: Wait for user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.item].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.item].ansible_ssh_user }} user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_items: nova_compute_result.results diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 2f05f0992..fc4ec3c88 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -5,39 +5,18 @@ vars_files: - vars.yml tasks: - - set_fact: cluster_group=tag_env_{{ cluster_id }} - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[cluster_group] | default([]) - -- hosts: oo_hosts_to_terminate - -- hosts: localhost - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Retrieve the floating IPs - shell: "neutron floatingip-list | awk '/{{ hostvars[item].ansible_default_ipv4.address }}/ {print $2}'" - with_items: groups['oo_hosts_to_terminate'] | default([]) - register: floating_ips_to_delete - - - name: Terminate instance(s) - nova_compute: - name: "{{ hostvars[item].os_name }}" - state: absent - with_items: groups['oo_hosts_to_terminate'] | default([]) - - - name: Delete floating IPs - command: "neutron floatingip-delete {{ item.stdout }}" - with_items: floating_ips_to_delete.results | default([]) - - - name: Destroy the network - command: "heat stack-delete {{ openstack_network_prefix }}-stack" + - name: Delete the OpenStack Stack + command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' register: stack_delete_result changed_when: stack_delete_result.rc == 0 failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout + + - name: Wait for the completion of the OpenStack Stack deletion + shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' + when: stack_delete_result.changed + register: stack_show_result + until: stack_show_result.stdout != 'DELETE_IN_PROGRESS' + retries: 60 + delay: 1 + failed_when: '"Stack not found" not in stack_show_result.stderr and + stack_show_result.stdout != "DELETE_COMPLETE"' diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index 1ae7c17d2..d077a6ced 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -1,6 +1,6 @@ --- openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | - default('files/heat_stack.yml', True) }}" + default('files/heat_stack.yaml', True) }}" openstack_network_prefix: "{{ lookup('oo_option', 'network_prefix' ) | default('openshift-ansible-'+cluster_id, True) }}" openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) | @@ -18,31 +18,19 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | default('0.0.0.0/0', True) }}" openstack_flavor: - master: - ram: "{{ lookup('oo_option', 'master_flavor_ram' ) | default(2048, True) }}" - id: "{{ lookup('oo_option', 'master_flavor_id' ) | default(True) }}" - include: "{{ lookup('oo_option', 'master_flavor_include') | default(True) }}" - node: - ram: "{{ lookup('oo_option', 'node_flavor_ram' ) | default(4096, True) }}" - id: "{{ lookup('oo_option', 'node_flavor_id' ) | default(True) }}" - include: "{{ lookup('oo_option', 'node_flavor_include' ) | default(True) }}" + master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}" + node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}" deployment_vars: origin: - image: - name: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" - id: + image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" ssh_user: openshift sudo: yes online: image: - name: - id: ssh_user: root sudo: no enterprise: - image: - name: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}" - id: + image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}" ssh_user: openshift sudo: yes -- cgit v1.2.3 From fb4083bb920d193c2f292b49f370667029c317ba Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Fri, 5 Jun 2015 17:44:33 +0200 Subject: Implement RHEL subscription for enterprise deployment type --- playbooks/aws/openshift-cluster/terminate.yml | 9 +++++++ .../update_repos_and_packages.yml | 5 ++++ playbooks/gce/openshift-cluster/terminate.yml | 15 +++++++++-- playbooks/libvirt/openshift-cluster/launch.yml | 2 +- playbooks/libvirt/openshift-cluster/terminate.yml | 17 +++++++++++++ playbooks/libvirt/openshift-cluster/vars.yml | 9 ++++--- .../openstack/openshift-cluster/terminate.yml | 25 +++++++++++++++++++ roles/rhel_subscribe/tasks/enterprise.yml | 5 ++++ roles/rhel_subscribe/tasks/main.yml | 29 ++++++++++++++++++++++ roles/rhel_unsubscribe/tasks/main.yml | 5 ++++ 10 files changed, 115 insertions(+), 6 deletions(-) create mode 100644 roles/rhel_subscribe/tasks/enterprise.yml create mode 100644 roles/rhel_subscribe/tasks/main.yml create mode 100644 roles/rhel_unsubscribe/tasks/main.yml (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 9c3703aba..3a08ed966 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -13,6 +13,15 @@ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([]) | difference(['localhost']) +- name: Unsubscribe VMs + hosts: oo_hosts_to_terminate + roles: + - role: rhel_unsubscribe + when: deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - name: Terminate instances hosts: localhost connection: local diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index e92c6f1ee..190e2d862 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -3,5 +3,10 @@ vars: openshift_deployment_type: "{{ deployment_type }}" roles: + - role: rhel_subscribe + when: deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] - openshift_repos - os_update_latest diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index abe6a4c95..098b0df73 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -8,7 +8,7 @@ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node - add_host: name: "{{ item }}" - groups: oo_nodes_to_terminate + groups: oo_hosts_to_terminate, oo_nodes_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) @@ -16,11 +16,22 @@ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - add_host: name: "{{ item }}" - groups: oo_masters_to_terminate + groups: oo_hosts_to_terminate, oo_masters_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) +- name: Unsubscribe VMs + hosts: oo_hosts_to_terminate + vars_files: + - vars.yml + roles: + - role: rhel_unsubscribe + when: deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - include: ../openshift-node/terminate.yml vars: gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index a7ddc1e7e..6badcb325 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -13,7 +13,7 @@ image_name: "{{ deployment_vars[deployment_type].image.name }}" tasks: - fail: msg="Deployment type not supported for libvirt provider yet" - when: deployment_type in ['online', 'enterprise'] + when: deployment_type == 'online' - include: tasks/configure_libvirt.yml diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index b173a09dd..8f00812a9 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -15,6 +15,23 @@ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[cluster_group] | default([]) +- name: Unsubscribe VMs + hosts: oo_hosts_to_terminate + vars_files: + - vars.yml + roles: + - role: rhel_unsubscribe + when: deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + +- name: Terminate instance(s) + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: - name: Destroy VMs virt: name: '{{ item[0] }}' diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index e3c8cd8d0..c77a0797e 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -24,9 +24,12 @@ deployment_vars: sudo: no enterprise: image: - url: - name: - sha256: + url: "{{ lookup('oo_option', 'image_url') | + default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}" + name: "{{ lookup('oo_option', 'image_name') | + default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}" + sha256: "{{ lookup('oo_option', 'image_sha256') | + default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}" ssh_user: openshift sudo: yes # origin: diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index fc4ec3c88..62df2be73 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -1,5 +1,30 @@ - name: Terminate instance(s) hosts: localhost + connection: local + gather_facts: no + vars_files: + - vars.yml + tasks: + - set_fact: cluster_group=tag_env_{{ cluster_id }} + - add_host: + name: "{{ item }}" + groups: oo_hosts_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[cluster_group] | default([]) + +- name: Unsubscribe VMs + hosts: oo_hosts_to_terminate + vars_files: + - vars.yml + roles: + - role: rhel_unsubscribe + when: deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + +- hosts: localhost connection: local gather_facts: no vars_files: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml new file mode 100644 index 000000000..fc4d44745 --- /dev/null +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -0,0 +1,5 @@ +--- +- name: Enable RHEL repositories + command: subscription-manager repos \ + --enable="rhel-7-server-rpms" \ + --enable="rhel-7-server-ose-3.0-rpms" diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml new file mode 100644 index 000000000..8fb2fc042 --- /dev/null +++ b/roles/rhel_subscribe/tasks/main.yml @@ -0,0 +1,29 @@ +--- +# TODO: Enhance redhat_subscription module +# to make it able to attach to a pool +# to make it able to enable repositories + +- set_fact: + rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" + rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" + +- fail: + msg: "This role is only supported for Red Hat hosts" + when: ansible_distribution != 'RedHat' + +- fail: + msg: Either rsub_user or the rhel_subscription_user env variable are required for this role. + when: rhel_subscription_user is not defined + +- fail: + msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role. + when: rhel_subscription_pass is not defined + +- name: RedHat subscriptions + redhat_subscription: + username: "{{ rhel_subscription_user }}" + password: "{{ rhel_subscription_pass }}" + autosubscribe: yes + +- include: enterprise.yml + when: deployment_type == 'enterprise' diff --git a/roles/rhel_unsubscribe/tasks/main.yml b/roles/rhel_unsubscribe/tasks/main.yml new file mode 100644 index 000000000..2aeb09d83 --- /dev/null +++ b/roles/rhel_unsubscribe/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: Remove RedHat subscriptions + redhat_subscription: + state: absent + when: ansible_distribution == "RedHat" -- cgit v1.2.3 From 6b4282004a4331d9db0e0ab857c96d83a738d82c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 14 Jul 2015 14:48:38 -0400 Subject: Initial HA master - Ability to specify multiple masters - configures the CA only a single time on the first master - creates and distributes additional certs for additional master hosts - Depending on the status of openshift_master_cluster_defer_ha (defaults to False) one of two actions are taken when multiple masters are defined 1. If openshift_master_cluster_defer_ha is true a. Certs/configs for all masters are deployed b. openshift-master service is only started and enabled on the master c. HA configuration is expected to be handled by the user manually after the completion of the playbook run. 2. If oepnshift_master_cluster_defer_ha is false or undefined a. Certs/configs for all masters are deployed b. a Pacemaker/RHEL HA cluster is configured i. VIPs are configured based on the values of openshift_master_cluster_vip and openshift_master_cluster_plublic_vip ii. The openshift-master service is configured as an active/passive cluster service --- inventory/byo/hosts.example | 20 +++++++++- playbooks/common/openshift-master/config.yml | 29 ++++++++++++--- roles/openshift_facts/library/openshift_facts.py | 42 ++++++++++++++++++--- roles/openshift_master/defaults/main.yml | 6 +++ roles/openshift_master/handlers/main.yml | 1 + roles/openshift_master/tasks/main.yml | 21 +++++++++++ roles/openshift_master_ca/tasks/main.yml | 2 +- roles/openshift_master_certificates/tasks/main.yml | 16 ++++++-- roles/openshift_master_certificates/vars/main.yml | 3 -- roles/openshift_master_cluster/README.md | 34 +++++++++++++++++ roles/openshift_master_cluster/meta/main.yml | 16 ++++++++ roles/openshift_master_cluster/tasks/configure.yml | 43 ++++++++++++++++++++++ .../tasks/configure_deferred.yml | 8 ++++ roles/openshift_master_cluster/tasks/main.yml | 13 +++++++ roles/openshift_node_certificates/tasks/main.yml | 2 +- 15 files changed, 234 insertions(+), 22 deletions(-) create mode 100644 roles/openshift_master_cluster/README.md create mode 100644 roles/openshift_master_cluster/meta/main.yml create mode 100644 roles/openshift_master_cluster/tasks/configure.yml create mode 100644 roles/openshift_master_cluster/tasks/configure_deferred.yml create mode 100644 roles/openshift_master_cluster/tasks/main.yml (limited to 'playbooks') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 41216dd89..56f4da5a2 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -38,14 +38,30 @@ deployment_type=enterprise # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] +# master cluster ha variables using pacemaker or RHEL HA +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# master cluster ha variables when using a different HA solution +# For installation the value of openshift_master_cluster_hostname must resolve +# to the first master defined in the inventory. +# The HA solution must be manually configured after installation and must ensure +# that openshift-master is running on a single master host. +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_defer_ha=True + # host group for masters [masters] -ose3-master-ansible.test.example.com +ose3-master[1:3]-ansible.test.example.com [etcd] ose3-etcd[1:3]-ansible.test.example.com # host group for nodes [nodes] -ose3-master-ansible.test.example.com openshift_scheduleable=False +ose3-master[1:3]-ansible.test.example.com openshift_scheduleable=False ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 3956128e1..904ad2dab 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -27,6 +27,9 @@ api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" + cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" + cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}" console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" @@ -152,16 +155,26 @@ roles: - openshift_master_certificates post_tasks: + - name: Remove generated etcd client certs when using external etcd + file: + path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" + state: absent + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config + with_nested: + - masters_needing_certs + - - master.etcd-client.crt + - master.etcd-client.key + - name: Create a tarball of the master certs command: > - tar -czvf {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz - -C {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }} . + tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz + -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} . args: - creates: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz" + creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" with_items: masters_needing_certs - name: Retrieve the master cert tarball from the master fetch: - src: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz" + src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" dest: "{{ sync_tmpdir }}/" flat: yes fail_on_missing: yes @@ -172,6 +185,7 @@ hosts: oo_masters_to_config vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -192,9 +206,14 @@ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False -- name: Deploy OpenShift examples +- name: Additional master configuration hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}" roles: + - role: openshift_master_cluster + when: openshift_master_ha | bool - openshift_examples # Additional instance config for online deployments diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 727861b07..d733639c3 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -367,9 +367,11 @@ def set_url_facts_if_unset(facts): console_path = facts['master']['console_path'] etcd_use_ssl = facts['master']['etcd_use_ssl'] etcd_hosts = facts['master']['etcd_hosts'] - etcd_port = facts['master']['etcd_port'], + etcd_port = facts['master']['etcd_port'] hostname = facts['common']['hostname'] public_hostname = facts['common']['public_hostname'] + cluster_hostname = facts['master'].get('cluster_hostname') + cluster_public_hostname = facts['master'].get('cluster_public_hostname') if 'etcd_urls' not in facts['master']: etcd_urls = [] @@ -384,24 +386,51 @@ def set_url_facts_if_unset(facts): etcd_port)] facts['master']['etcd_urls'] = etcd_urls if 'api_url' not in facts['master']: - facts['master']['api_url'] = format_url(api_use_ssl, hostname, + api_hostname = cluster_hostname if cluster_hostname else hostname + facts['master']['api_url'] = format_url(api_use_ssl, api_hostname, api_port) if 'public_api_url' not in facts['master']: + api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname facts['master']['public_api_url'] = format_url(api_use_ssl, - public_hostname, + api_public_hostname, api_port) if 'console_url' not in facts['master']: + console_hostname = cluster_hostname if cluster_hostname else hostname facts['master']['console_url'] = format_url(console_use_ssl, - hostname, + console_hostname, console_port, console_path) if 'public_console_url' not in facts['master']: + console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname facts['master']['public_console_url'] = format_url(console_use_ssl, - public_hostname, + console_public_hostname, console_port, console_path) return facts +def set_aggregate_facts(facts): + """ Set aggregate facts + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with aggregated facts + """ + all_hostnames = set() + if 'common' in facts: + all_hostnames.add(facts['common']['hostname']) + all_hostnames.add(facts['common']['public_hostname']) + + if 'master' in facts: + if 'cluster_hostname' in facts['master']: + all_hostnames.add(facts['master']['cluster_hostname']) + if 'cluster_public_hostname' in facts['master']: + all_hostnames.add(facts['master']['cluster_public_hostname']) + + facts['common']['all_hostnames'] = list(all_hostnames) + + return facts + def set_sdn_facts_if_unset(facts): """ Set sdn facts if not already present in facts dict @@ -675,6 +704,7 @@ class OpenShiftFacts(object): facts = set_identity_providers_if_unset(facts) facts = set_registry_url_if_unset(facts) facts = set_sdn_facts_if_unset(facts) + facts = set_aggregate_facts(facts) return dict(openshift=facts) def get_defaults(self, roles): @@ -713,7 +743,7 @@ class OpenShiftFacts(object): session_name='ssn', session_secrets_file='', access_token_max_seconds=86400, auth_token_max_seconds=500, - oauth_grant_method='auto') + oauth_grant_method='auto', cluster_defer_ha=False) defaults['master'] = master if 'node' in roles: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 11195e83e..ca8860099 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -15,6 +15,12 @@ os_firewall_allow: port: 24224/tcp - service: Fluentd td-agent udp port: 24224/udp +- service: pcsd + port: 2224/tcp +- service: Corosync UDP + port: 5404/udp +- service: Corosync UDP + port: 5405/udp os_firewall_deny: - service: OpenShift api http port: 8080/tcp diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 6fd4dfb51..d57f9a4ea 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,3 +1,4 @@ --- - name: restart openshift-master service: name=openshift-master state=restarted + when: not openshift_master_ha diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 02905f32d..2311568dd 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -8,6 +8,10 @@ - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods when: openshift_master_oauth_grant_method is defined +- fail: + msg: "openshift_master_cluster_password must be set for multi-master installations" + when: openshift_master_ha and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined + - name: Install OpenShift Master package yum: pkg=openshift-master state=present register: install_result @@ -16,6 +20,9 @@ openshift_facts: role: master local_facts: + cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" + cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" + cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}" debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" @@ -114,12 +121,26 @@ - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started + when: not openshift_master_ha register: start_result - name: pause to prevent service restart from interfering with bootstrapping pause: seconds=30 when: start_result | changed +- name: Install cluster packagese + yum: pkg=pcs state=present + when: openshift_master_ha and not openshift.master.cluster_defer_ha | bool + register: install_result + +- name: Start and enable cluster service + service: name=pcsd enabled=yes state=started + when: openshift_master_ha and not openshift.master.cluster_defer_ha | bool + +- name: Set the cluster user password + shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster + when: install_result | changed + - name: Create the OpenShift client config dir(s) file: path: "~{{ item }}/.kube" diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 8163ecd7f..03eb7e15f 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -14,7 +14,7 @@ - name: Create the master certificates if they do not already exist command: > {{ openshift.common.admin_binary }} create-master-certs - --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }} + --hostnames={{ openshift.common.all_hostnames | join(',') }} --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_master_config_dir }} --overwrite=false diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index b5a3f8e40..297d53bcd 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -7,14 +7,20 @@ with_items: masters_needing_certs - file: - src: "{{ openshift_master_ca_cert }}" - dest: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/ca.crt" - with_items: masters_needing_certs + src: "{{ openshift_master_config_dir }}/{{ item.1 }}" + dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" + state: hard + with_nested: + - masters_needing_certs + - - ca.crt + - ca.key + - ca.serial.txt + - name: Create the master certificates if they do not already exist command: > {{ openshift.common.admin_binary }} create-master-certs - --hostnames={{ item.openshift.common.hostname }},{{ item.openshift.common.public_hostname }} + --hostnames={{ item.openshift.common.all_hostnames | join(',') }} --master={{ item.openshift.master.api_url }} --public-master={{ item.openshift.master.public_api_url }} --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }} @@ -22,3 +28,5 @@ args: creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt" with_items: masters_needing_certs + + diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml index 6e577b13b..6214f7918 100644 --- a/roles/openshift_master_certificates/vars/main.yml +++ b/roles/openshift_master_certificates/vars/main.yml @@ -1,6 +1,3 @@ --- openshift_generated_configs_dir: /etc/openshift/generated-configs openshift_master_config_dir: /etc/openshift/master -openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" -openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" -openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md new file mode 100644 index 000000000..f150981fa --- /dev/null +++ b/roles/openshift_master_cluster/README.md @@ -0,0 +1,34 @@ +OpenShift Master Cluster +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml new file mode 100644 index 000000000..f3236e850 --- /dev/null +++ b/roles/openshift_master_cluster/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml new file mode 100644 index 000000000..9a2546bca --- /dev/null +++ b/roles/openshift_master_cluster/tasks/configure.yml @@ -0,0 +1,43 @@ +--- +- fail: + msg: This role requires that openshift_master_cluster_vip is set + when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip +- fail: + msg: This role requires that openshift_master_cluster_public_vip is set + when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip + +- name: Authenticate to the cluster + command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }} + +- name: Create the cluster + command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }} + +- name: Start the cluster + command: pcs cluster start --all + +- name: Enable the cluster on all nodes + command: pcs cluster enable --all + +- name: Set default resource stickiness + command: pcs resource defaults resource-stickiness=100 + +- name: Add the cluster VIP resource + command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group openshift-master + +- name: Add the cluster public VIP resource + command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group openshift-master + when: openshift_master_cluster_public_vip != openshift_master_cluster_vip + +- name: Add the cluster openshift-master service resource + command: pcs resource create master systemd:openshift-master --group openshift-master + +- name: Disable stonith + command: pcs property set stonith-enabled=false + +# TODO: handle case where api port is not 8443 +- name: Wait for the clustered master service to be available + wait_for: + host: "{{ openshift_master_cluster_vip }}" + port: 8443 + state: started + timeout: 300 diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml new file mode 100644 index 000000000..a80b6c5b4 --- /dev/null +++ b/roles/openshift_master_cluster/tasks/configure_deferred.yml @@ -0,0 +1,8 @@ +--- +- debug: msg="Deferring config" + +- name: Start and enable openshift-master + service: + name: openshift-master + state: started + enabled: yes diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml new file mode 100644 index 000000000..315947183 --- /dev/null +++ b/roles/openshift_master_cluster/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Test if cluster is already configured + command: pcs status + register: pcs_status + changed_when: false + failed_when: false + when: not openshift.master.cluster_defer_ha | bool + +- include: configure.yml + when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" + +- include: configure_deferred.yml + when: openshift.master.cluster_defer_ha | bool diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 64a799dfb..c9f02aaf0 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -25,7 +25,7 @@ command: > {{ openshift.common.admin_binary }} create-server-cert --cert=server.crt --key=server.key --overwrite=true - --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }} + --hostnames={{ openshift.common.all_hostnames |join(",") }} --signer-cert={{ openshift_master_ca_cert }} --signer-key={{ openshift_master_ca_key }} --signer-serial={{ openshift_master_ca_serial }} -- cgit v1.2.3 From bce46b21707c399c05893aecc89316c70c97fada Mon Sep 17 00:00:00 2001 From: Patrick Tescher Date: Mon, 20 Jul 2015 12:18:22 -0700 Subject: Switch to Centos and enable SDN on origin. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fedora 21 doesn’t seem to have docker 1.6.2. Centos does. Also without SDN networking was not working on AWS. --- README_AWS.md | 39 ++++++++++++++++++++++++++++++++ playbooks/aws/openshift-cluster/vars.yml | 6 ++--- 2 files changed, 42 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/README_AWS.md b/README_AWS.md index 0e3128a92..69aa65126 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -22,6 +22,27 @@ Note: You must source this file before running any Ansible commands. Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format. +Subscribe to CentOS +------------------- + +1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW) + + +Set up Security Group +--------------------- +By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN. +You may also want to allow access from the outside world on the following ports: + +``` +• 22 - ssh +• 80 - Web Apps +• 443 - Web Apps (https) +• 4789 - SDN / VXLAN +• 8443 - Openshift Console +• 10250 - kubelet +``` + + (Optional) Setup your $HOME/.ssh/config file ------------------------------------------- In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config` @@ -130,3 +151,21 @@ The --deployment-type flag can be passed to bin/cluster to specify the deploymen bin/cluster create aws --deployment-type=online ``` Note: If no deployment type is specified, then the default is origin. + + +## Post-ansible steps +Create the default router +------------------------- +On the master host: +```sh +oadm router --create=true \ + --credentials=/etc/openshift/master/openshift-router.kubeconfig +``` + +Create the default docker-registry +---------------------------------- +On the master host: +```sh +oadm registry --create=true \ + --credentials=/etc/openshift/master/openshift-registry.kubeconfig +``` \ No newline at end of file diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 07e453f89..aad95ef48 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1,11 +1,11 @@ --- deployment_vars: origin: - # fedora, since centos requires marketplace - image: ami-acd999c4 + # centos-7, requires marketplace + image: ami-96a818fe image_name: region: us-east-1 - ssh_user: fedora + ssh_user: centos sudo: yes keypair: libra type: m3.large -- cgit v1.2.3 From c4cca1d7184ae859706b5854a04f18095c12f1d6 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Mon, 20 Jul 2015 16:20:12 -0400 Subject: Infra node support --- bin/cluster | 5 +++- filter_plugins/oo_filters.py | 1 - playbooks/aws/openshift-cluster/launch.yml | 17 +++++++++++ .../openshift-cluster/tasks/launch_instances.yml | 35 +++++++++++++++++++++- playbooks/aws/openshift-cluster/terminate.yml | 1 + .../aws/openshift-cluster/vars.online.int.yml | 10 +++++-- .../aws/openshift-cluster/vars.online.prod.yml | 10 +++++-- .../aws/openshift-cluster/vars.online.stage.yml | 10 +++++-- .../set_node_launch_facts_tasks.yml | 8 +++-- playbooks/common/openshift-node/config.yml | 10 +++---- playbooks/gce/openshift-cluster/launch.yml | 20 +++++++++++-- .../openshift-cluster/tasks/launch_instances.yml | 1 + playbooks/libvirt/openshift-cluster/launch.yml | 16 ++++++++++ playbooks/openstack/openshift-cluster/launch.yml | 22 ++++++++++++-- playbooks/openstack/openshift-cluster/vars.yml | 1 + 15 files changed, 145 insertions(+), 22 deletions(-) (limited to 'playbooks') diff --git a/bin/cluster b/bin/cluster index 746c0349a..7eb4a4448 100755 --- a/bin/cluster +++ b/bin/cluster @@ -51,6 +51,7 @@ class Cluster(object): env['num_masters'] = args.masters env['num_nodes'] = args.nodes + env['num_infra'] = args.infra env['num_etcd'] = args.etcd return self.action(args, inventory, env, playbook) @@ -149,7 +150,7 @@ class Cluster(object): boto_conf_files = ['~/.aws/credentials', '~/.boto'] conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf)) - boto_configs = [ conf for conf in boto_conf_files if conf_exists(conf)] + boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)] if len(key_missing) > 0 and len(boto_configs) == 0: raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(missing)) @@ -262,6 +263,8 @@ if __name__ == '__main__': help='number of masters to create in cluster') create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') + create_parser.add_argument('-i', '--infra', default=1, type=int, + help='number of infra nodes to create in cluster') create_parser.add_argument('-e', '--etcd', default=0, type=int, help='number of external etcd hosts to create in cluster') create_parser.set_defaults(func=cluster.create) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 18229631c..88e86f67c 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -50,7 +50,6 @@ class FilterModule(object): return [item for sublist in data for item in sublist] - @staticmethod def oo_collect(data, attribute=None, filters=None): ''' This takes a list of dict and collects all attributes specified into a diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 5db87fa90..a89275597 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -17,6 +17,7 @@ instances: "{{ etcd_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "default" - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml - include: tasks/launch_instances.yml @@ -24,13 +25,29 @@ instances: "{{ master_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "default" - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "compute" + count: "{{ num_nodes }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "infra" + count: "{{ num_infra }}" - include: tasks/launch_instances.yml vars: instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" - add_host: name: "{{ master_names.0 }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 25a87aaf6..92155582e 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -5,6 +5,7 @@ env: "{{ cluster }}" env_host_type: "{{ cluster }}-openshift-{{ type }}" host_type: "{{ type }}" + sub_host_type: "{{ g_sub_host_type }}" - set_fact: ec2_region: "{{ lookup('env', 'ec2_region') @@ -34,6 +35,35 @@ ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip') | default(deployment_vars[deployment_type].assign_public_ip, true) }}" when: ec2_assign_public_ip is not defined + +- set_fact: + ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}" + ec2_security_groups: "{{ ec2_master_security_groups + | default(deployment_vars[deployment_type].security_groups, true) }}" + when: host_type == "master" and sub_host_type == "default" + +- set_fact: + ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}" + ec2_security_groups: "{{ ec2_etcd_security_groups + | default(deployment_vars[deployment_type].security_groups, true)}}" + when: host_type == "etcd" and sub_host_type == "default" + +- set_fact: + ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}" + ec2_security_groups: "{{ ec2_infra_security_groups + | default(deployment_vars[deployment_type].security_groups, true) }}" + when: host_type == "node" and sub_host_type == "infra" + +- set_fact: + ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}" + ec2_security_groups: "{{ ec2_node_security_groups + | default(deployment_vars[deployment_type].security_groups, true) }}" + when: host_type == "node" and sub_host_type == "compute" + +- set_fact: + ec2_instance_type: "{{ lookup('env', 'ec2_instance_type') + | default(deployment_vars[deployment_type].type, true) }}" + when: ec2_instance_type is not defined - set_fact: ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" @@ -99,6 +129,7 @@ env: "{{ env }}" host-type: "{{ host_type }}" env-host-type: "{{ env_host_type }}" + sub-host-type: "{{ sub_host_type }}" volumes: "{{ volumes }}" register: ec2 @@ -112,7 +143,9 @@ Name: "{{ item.0 }}" - set_fact: - instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} + instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, + tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, + tag_sub-host-type_{{ sub_host_type }}" - name: Add new instances groups and variables add_host: diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 3a08ed966..77287cad0 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -37,6 +37,7 @@ env: "{{ item['ec2_tag_env'] }}" host-type: "{{ item['ec2_tag_host-type'] }}" env-host-type: "{{ item['ec2_tag_env-host-type'] }}" + sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}" with_items: host_vars when: "'oo_hosts_to_terminate' in groups" diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index e406a7635..fc8b8d2d2 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_instance_type: m3.large -ec2_security_groups: [ 'int-v3' ] +ec2_master_instance_type: m3.large +ec2_master_security_groups: [ 'integration', 'integration-master' ] +ec2_infra_instance_type: m3.large +ec2_infra_security_groups: [ 'integration', 'integration-infra' ] +ec2_node_instance_type: m3.large +ec2_node_security_groups: [ 'integration', 'integration-node' ] +ec2_etcd_instance_type: m3.large +ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index e406a7635..f68d41fc4 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_instance_type: m3.large -ec2_security_groups: [ 'int-v3' ] +ec2_master_instance_type: m3.large +ec2_master_security_groups: [ 'production', 'production-master' ] +ec2_infra_instance_type: m3.large +ec2_infra_security_groups: [ 'production', 'production-infra' ] +ec2_node_instance_type: m3.large +ec2_node_security_groups: [ 'production', 'production-node' ] +ec2_etcd_instance_type: m3.large +ec2_etcd_security_groups: [ 'production', 'production-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index e406a7635..ce9869fcd 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -3,7 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_instance_type: m3.large -ec2_security_groups: [ 'int-v3' ] +ec2_master_instance_type: m3.large +ec2_master_security_groups: [ 'stage', 'stage-master' ] +ec2_infra_instance_type: m3.large +ec2_infra_security_groups: [ 'stage', 'stage-infra' ] +ec2_node_instance_type: m3.large +ec2_node_security_groups: [ 'stage', 'stage-node' ] +ec2_etcd_instance_type: m3.large +ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml index 96e1a9a63..278942f8b 100644 --- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml @@ -1,11 +1,13 @@ --- -- set_fact: k8s_type="node" +- set_fact: k8s_type=node +- set_fact: sub_host_type="{{ type }}" +- set_fact: number_nodes="{{ count }}" - name: Generate node instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output - with_sequence: count={{ num_nodes }} + with_sequence: count={{ number_nodes }} - set_fact: node_names: "{{ node_names_output.results | default([]) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index bd35008b8..6ef375bbb 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -129,16 +129,14 @@ openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname') }}" - openshift_unscheduleable_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config'] - | default([])) - | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" + openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) + | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" pre_tasks: - set_fact: openshift_scheduleable_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config'] - | default([])) + | oo_select_keys(groups['oo_nodes_to_config'] | default([])) | oo_collect('openshift.common.hostname') | difference(openshift_unscheduleable_nodes) }}" + roles: - openshift_manage_node diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 35737f03d..7a3b80da0 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -15,17 +15,33 @@ instances: "{{ master_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "default" - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "compute" + count: "{{ num_nodes }}" - include: tasks/launch_instances.yml vars: instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ infra_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" - set_fact: - a_master: "{{ master_names[0] }}" - - add_host: name={{ a_master }} groups=service_master + a_infra: "{{ infra_names[0] }}" + - add_host: name={{ a_infra }} groups=service_master - include: update.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 9a9848f05..6307ecc27 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -14,6 +14,7 @@ - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - env-{{ cluster }} - host-type-{{ type }} + - sub-host-type-{{ sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} register: gce diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 6badcb325..6630fa27d 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -23,13 +23,29 @@ instances: "{{ master_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "default" - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "compute" + count: "{{ num_nodes }}" - include: tasks/launch_instances.yml vars: instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ infra_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" - include: update.yml diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 3cdd2ae4d..d41448dc0 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -27,10 +27,13 @@ -P ssh_incoming={{ openstack_ssh_access_from }} -P num_masters={{ num_masters }} -P num_nodes={{ num_nodes }} + -P num_infra={{ num_infra }} -P master_image={{ deployment_vars[deployment_type].image }} -P node_image={{ deployment_vars[deployment_type].image }} + -P infra_image={{ deployment_vars[deployment_type].image }} -P master_flavor={{ openstack_flavor["master"] }} -P node_flavor={{ openstack_flavor["node"] }} + -P infra_flavor={{ openstack_flavor["infra"] }} -P ssh_public_key="{{ openstack_ssh_public_key }}" openshift-ansible-{{ cluster_id }}-stack' when: stack_show_result.rc == 1 @@ -43,10 +46,13 @@ -P ssh_incoming={{ openstack_ssh_access_from }} -P num_masters={{ num_masters }} -P num_nodes={{ num_nodes }} + -P num_infra={{ num_infra }} -P master_image={{ deployment_vars[deployment_type].image }} -P node_image={{ deployment_vars[deployment_type].image }} + -P infra_image={{ deployment_vars[deployment_type].image }} -P master_flavor={{ openstack_flavor["master"] }} -P node_flavor={{ openstack_flavor["node"] }} + -P infra_flavor={{ openstack_flavor["infra"] }} -P ssh_public_key="{{ openstack_ssh_public_key }}" openshift-ansible-{{ cluster_id }}-stack' when: stack_show_result.rc == 0 @@ -72,7 +78,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default' with_together: - parsed_outputs.master_names - parsed_outputs.master_ips @@ -84,12 +90,24 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_node' with_together: - parsed_outputs.node_names - parsed_outputs.node_ips - parsed_outputs.node_floating_ips + - name: Add new infra instances groups and variables + add_host: + hostname: '{{ item[0] }}' + ansible_ssh_host: '{{ item[2] }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra' + with_together: + - parsed_outputs.infra_names + - parsed_outputs.infra_ips + - parsed_outputs.infra_floating_ips + - name: Wait for ssh wait_for: host: '{{ item }}' diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index d077a6ced..43e25f2e6 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -19,6 +19,7 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | default('0.0.0.0/0', True) }}" openstack_flavor: master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}" + infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}" node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}" deployment_vars: -- cgit v1.2.3 From 2a6fc886cc080ec1344d9ad4767f0fcde7ba3442 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Fri, 24 Jul 2015 10:52:29 -0400 Subject: Adding initial zabbix setup --- filter_plugins/oo_filters.py | 10 - filter_plugins/oo_zabbix_filters.py | 79 +++++++ playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 66 ++++++ playbooks/adhoc/zabbix_setup/create_app.yml | 34 +++ .../adhoc/zabbix_setup/create_application.yml | 18 ++ playbooks/adhoc/zabbix_setup/create_template.yml | 59 +++++ playbooks/adhoc/zabbix_setup/filter_plugins | 1 + playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 41 ++++ .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 33 +++ .../adhoc/zabbix_setup/vars/template_host.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_master.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_node.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_os_linux.yml | 248 +++++++++++++++++++++ .../adhoc/zabbix_setup/vars/template_router.yml | 27 +++ 14 files changed, 687 insertions(+), 10 deletions(-) create mode 100644 filter_plugins/oo_zabbix_filters.py create mode 100644 playbooks/adhoc/zabbix_setup/clean_zabbix.yml create mode 100644 playbooks/adhoc/zabbix_setup/create_app.yml create mode 100644 playbooks/adhoc/zabbix_setup/create_application.yml create mode 100644 playbooks/adhoc/zabbix_setup/create_template.yml create mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins create mode 100644 playbooks/adhoc/zabbix_setup/setup_zabbix.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_host.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_master.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_node.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_router.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 88e86f67c..47033a88e 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -231,15 +231,6 @@ class FilterModule(object): # Gather up the values for the list of keys passed in return [x for x in data if x[filter_attr]] - @staticmethod - def oo_build_zabbix_list_dict(values, string): - ''' Build a list of dicts with string as key for each value - ''' - rval = [] - for value in values: - rval.append({string: value}) - return rval - @staticmethod def oo_parse_heat_stack_outputs(data): ''' Formats the HEAT stack output into a usable form @@ -320,6 +311,5 @@ class FilterModule(object): "oo_combine_key_value": self.oo_combine_key_value, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, - "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs } diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py new file mode 100644 index 000000000..a473993a2 --- /dev/null +++ b/filter_plugins/oo_zabbix_filters.py @@ -0,0 +1,79 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom zabbix filters for use in openshift-ansible +''' + +import pdb + +class FilterModule(object): + ''' Custom zabbix ansible filters ''' + + @staticmethod + def create_data(data, results, key, new_key): + '''Take a dict, filter through results and add results['key'] to dict + ''' + new_list = [app[key] for app in results] + data[new_key] = new_list + return data + + @staticmethod + def oo_set_zbx_trigger_triggerid(item, trigger_results): + '''Set zabbix trigger id from trigger results + ''' + if isinstance(trigger_results, list): + item['triggerid'] = trigger_results[0]['triggerid'] + return item + + item['triggerid'] = trigger_results['triggerids'][0] + return item + + @staticmethod + def oo_set_zbx_item_hostid(item, template_results): + ''' Set zabbix host id from template results + ''' + if isinstance(template_results, list): + item['hostid'] = template_results[0]['templateid'] + return item + + item['hostid'] = template_results['templateids'][0] + return item + + @staticmethod + def oo_pdb(arg): + ''' This pops you into a pdb instance where arg is the data passed in + from the filter. + Ex: "{{ hostvars | oo_pdb }}" + ''' + pdb.set_trace() + return arg + + @staticmethod + def select_by_name(ans_data, data): + ''' test + ''' + for zabbix_item in data: + if ans_data['name'] == zabbix_item: + data[zabbix_item]['params']['hostid'] = ans_data['templateid'] + return data[zabbix_item]['params'] + return None + + @staticmethod + def oo_build_zabbix_list_dict(values, string): + ''' Build a list of dicts with string as key for each value + ''' + rval = [] + for value in values: + rval.append({string: value}) + return rval + + def filters(self): + ''' returns a mapping of filters to methods ''' + return { + "select_by_name": self.select_by_name, + "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid, + "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid, + "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, + "create_data": self.create_data, + } diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml new file mode 100644 index 000000000..bd71e6d1d --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -0,0 +1,66 @@ +--- +- hosts: localhost + gather_facts: no + vars: + g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php + g_zuser: Admin + g_zpassword: zabbix + roles: + - ../roles/os_zabbix + post_tasks: + + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + params: + output: extend + search: + host: 'Template Heartbeat' + register: templ_heartbeat + + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + params: + output: extend + search: + host: 'Template App Zabbix Server' + register: templ_zabbix_server + + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + params: + output: extend + search: + host: 'Template App Zabbix Agent' + register: templ_zabbix_agent + + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + register: templates + + - debug: var=templ_heartbeat.results + + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: absent + params: "{{templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('templateid') }}" + register: template_results + when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_app.yml b/playbooks/adhoc/zabbix_setup/create_app.yml new file mode 100644 index 000000000..3a08b2301 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_app.yml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php + g_zuser: Admin + g_zpassword: zabbix + roles: + - ../roles/os_zabbix + post_tasks: + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + params: + output: extend + register: templates + + - debug: var=templates + + - name: Create app + include: create_application.yml + vars: + ctp_template: "{{ g_template_heartbeat }}" + ctp_zserver: "{{ g_zserver }}" + ctp_zuser: "{{ g_zuser }}" + ctp_zpassword: "{{ g_zpassword }}" + + diff --git a/playbooks/adhoc/zabbix_setup/create_application.yml b/playbooks/adhoc/zabbix_setup/create_application.yml new file mode 100644 index 000000000..aa6c40ed8 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_application.yml @@ -0,0 +1,18 @@ +--- +- debug: var=ctp_template + +- name: Create Application + zbxapi: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + zbx_class: Application + state: present + params: + name: "{{ ctp_template.application['name'] }}" + hostid: 10085 + search: + name: "{{ ctp_template.application['name'] }}" + register: ctp_created_application + +- debug: var=ctp_created_application diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml new file mode 100644 index 000000000..07724d5b7 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_template.yml @@ -0,0 +1,59 @@ +--- +- debug: var=ctp_template + +- name: Create Template + zbxapi: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + zbx_class: Template + state: present + params: "{{ ctp_template.params }}" + register: ctp_created_templates + +- debug: var=ctp_created_templates + +#- name: Create Application +# zbxapi: +# server: "{{ ctp_zserver }}" +# user: "{{ ctp_zuser }}" +# password: "{{ ctp_zpassword }}" +# zbx_class: Application +# state: present +# params: +# name: "{{ ctp_template.application.name}}" +# hostid: "{{ ctp_created_templates.results[0].templateid }}" +# search: +# name: "{{ ctp_template.application.name}}" +# register: ctp_created_application + +- debug: var=ctp_created_application + +- name: Create Items + zbxapi: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + zbx_class: Item + state: present + params: "{{ item | oo_set_zbx_item_hostid(ctp_created_templates.results) }}" + with_items: ctp_template.zitems + register: ctp_created_items + +- debug: var=ctp_created_items + +- name: Create Triggers + zbxapi: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + zbx_class: Trigger + state: present + params: "{{ item }}" + with_items: ctp_template.ztriggers + register: ctp_created_triggers + when: ctp_template.ztriggers is defined + +- debug: var=ctp_created_triggers + + diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml new file mode 100644 index 000000000..286f699e5 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml @@ -0,0 +1,41 @@ +--- +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php + g_zuser: Admin + g_zpassword: zabbix + roles: + - ../roles/os_zabbix + post_tasks: + - zbxapi: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + zbx_class: Template + state: list + params: + output: extend + register: templates + + - debug: var=templates + + - name: Include Template + include: create_template.yml + vars: + ctp_template: "{{ g_template_heartbeat }}" + ctp_zserver: "{{ g_zserver }}" + ctp_zuser: "{{ g_zuser }}" + ctp_zpassword: "{{ g_zpassword }}" + + - name: Include Template + include: create_template.yml + vars: + ctp_template: "{{ g_template_os_linux }}" + ctp_zserver: "{{ g_zserver }}" + ctp_zuser: "{{ g_zuser }}" + ctp_zpassword: "{{ g_zpassword }}" + diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml new file mode 100644 index 000000000..9d6145ec4 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml @@ -0,0 +1,33 @@ +--- +g_template_heartbeat: + application: + name: Heartbeat +#output: extend + search: + name: Heartbeat + params: + name: Template Heartbeat + host: Template Heartbeat + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Heartbeat + zitems: + - name: Heartbeat Ping + hostid: + key_: heartbeat.ping + type: 2 + value_type: 1 + output: extend + search: + key_: heartbeat.ping + selectApplications: extend + ztriggers: + - description: 'Heartbeat.ping has failed on {HOST.NAME}' + expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Heartbeat.ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml new file mode 100644 index 000000000..e7cc667cb --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_host.yml @@ -0,0 +1,27 @@ +--- +g_template_host: + params: + name: Template Host + host: Template Host + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Host + zitems: + - name: Host Ping + hostid: + key_: host.ping + type: 2 + value_type: 0 + output: extend + search: + key_: host.ping + ztriggers: + - description: 'Host ping has failed on {HOST.NAME}' + expression: '{Template Host:host.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Host ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml new file mode 100644 index 000000000..5f9b41a4f --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_master.yml @@ -0,0 +1,27 @@ +--- +g_template_master: + params: + name: Template Master + host: Template Master + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Master + zitems: + - name: Master Etcd Ping + hostid: + key_: master.etcd.ping + type: 2 + value_type: 0 + output: extend + search: + key_: master.etcd.ping + ztriggers: + - description: 'Master Etcd ping has failed on {HOST.NAME}' + expression: '{Template Master:master.etcd.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Master Etcd ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml new file mode 100644 index 000000000..98c343a24 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_node.yml @@ -0,0 +1,27 @@ +--- +g_template_node: + params: + name: Template Node + host: Template Node + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Node + zitems: + - name: Kubelet Ping + hostid: + key_: kubelet.ping + type: 2 + value_type: 0 + output: extend + search: + key_: kubelet.ping + ztriggers: + - description: 'Kubelet ping has failed on {HOST.NAME}' + expression: '{Template Node:kubelet.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Kubelet ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml new file mode 100644 index 000000000..b89711632 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml @@ -0,0 +1,248 @@ +--- +g_template_os_linux: + application: + name: OS Linux + output: extend + search: + name: OS Linux + params: + name: Template OS Linux + host: Template OS Linux + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template OS Linux + zitems: + - hostid: null + key_: kernel.uname.sysname + name: kernel.uname.sysname + search: + key_: kernel.uname.sysname + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.wait.total + name: kernel.all.cpu.wait.total + search: + key_: kernel.all.cpu.wait.total + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.irq.hard + name: kernel.all.cpu.irq.hard + search: + key_: kernel.all.cpu.irq.hard + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.idle + name: kernel.all.cpu.idle + search: + key_: kernel.all.cpu.idle + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.uname.distro + name: kernel.uname.distro + search: + key_: kernel.uname.distro + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: kernel.uname.nodename + name: kernel.uname.nodename + search: + key_: kernel.uname.nodename + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.irq.soft + name: kernel.all.cpu.irq.soft + search: + key_: kernel.all.cpu.irq.soft + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.load.15_minute + name: kernel.all.load.15_minute + search: + key_: kernel.all.load.15_minute + type: 2 + value_type: 0 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.sys + name: kernel.all.cpu.sys + search: + key_: kernel.all.cpu.sys + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.load.5_minute + name: kernel.all.load.5_minute + search: + key_: kernel.all.load.5_minute + type: 2 + value_type: 0 + selectApplications: extend + - hostid: null + key_: mem.freemem + name: mem.freemem + search: + key_: mem.freemem + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.nice + name: kernel.all.cpu.nice + search: + key_: kernel.all.cpu.nice + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: mem.util.bufmem + name: mem.util.bufmem + search: + key_: mem.util.bufmem + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: swap.used + name: swap.used + search: + key_: swap.used + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.load.1_minute + name: kernel.all.load.1_minute + search: + key_: kernel.all.load.1_minute + type: 2 + value_type: 0 + selectApplications: extend + - hostid: null + key_: kernel.uname.version + name: kernel.uname.version + search: + key_: kernel.uname.version + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: swap.length + name: swap.length + search: + key_: swap.length + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: mem.physmem + name: mem.physmem + search: + key_: mem.physmem + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.uptime + name: kernel.all.uptime + search: + key_: kernel.all.uptime + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: swap.free + name: swap.free + search: + key_: swap.free + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: mem.util.used + name: mem.util.used + search: + key_: mem.util.used + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.user + name: kernel.all.cpu.user + search: + key_: kernel.all.cpu.user + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.uname.machine + name: kernel.uname.machine + search: + key_: kernel.uname.machine + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: hinv.ncpu + name: hinv.ncpu + search: + key_: hinv.ncpu + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: mem.util.cached + name: mem.util.cached + search: + key_: mem.util.cached + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.cpu.steal + name: kernel.all.cpu.steal + search: + key_: kernel.all.cpu.steal + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.all.pswitch + name: kernel.all.pswitch + search: + key_: kernel.all.pswitch + type: 2 + value_type: 3 + selectApplications: extend + - hostid: null + key_: kernel.uname.release + name: kernel.uname.release + search: + key_: kernel.uname.release + type: 2 + value_type: 4 + selectApplications: extend + - hostid: null + key_: proc.nprocs + name: proc.nprocs + search: + key_: proc.nprocs + type: 2 + value_type: 3 + selectApplications: extend diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml new file mode 100644 index 000000000..4dae7da1e --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_router.yml @@ -0,0 +1,27 @@ +--- +g_template_router: + params: + name: Template Router + host: Template Router + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Router + zitems: + - name: Router Backends down + hostid: + key_: router.backends.down + type: 2 + value_type: 0 + output: extend + search: + key_: router.backends.down + ztriggers: + - description: 'Number of router backends down on {HOST.NAME}' + expression: '{Template Router:router.backends.down.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Number of router backends down on {HOST.NAME}' + expandExpression: True -- cgit v1.2.3 From 52c0fa400b74cec8ab138c6a46078b83e574ed05 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Fri, 24 Jul 2015 11:15:37 -0400 Subject: Removed debug statements --- playbooks/adhoc/zabbix_setup/create_template.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml index 07724d5b7..b055e78eb 100644 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ b/playbooks/adhoc/zabbix_setup/create_template.yml @@ -27,7 +27,7 @@ # name: "{{ ctp_template.application.name}}" # register: ctp_created_application -- debug: var=ctp_created_application +#- debug: var=ctp_created_application - name: Create Items zbxapi: @@ -40,7 +40,7 @@ with_items: ctp_template.zitems register: ctp_created_items -- debug: var=ctp_created_items +#- debug: var=ctp_created_items - name: Create Triggers zbxapi: @@ -54,6 +54,6 @@ register: ctp_created_triggers when: ctp_template.ztriggers is defined -- debug: var=ctp_created_triggers +#- debug: var=ctp_created_triggers -- cgit v1.2.3 From 1f7c1c62c039b76c9a95532622785a4b4be67ce8 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 27 Jul 2015 09:30:05 -0400 Subject: Ansible deploy for our monitoring containers --- .../adhoc/deploy_monitoring_containers/deploy.yml | 58 ++++++++++++++++++++++ .../oso-f22-host-monitoring.service | 36 ++++++++++++++ .../oso-rhel7-zagg-client.service | 39 +++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 playbooks/adhoc/deploy_monitoring_containers/deploy.yml create mode 100644 playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service create mode 100644 playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service (limited to 'playbooks') diff --git a/playbooks/adhoc/deploy_monitoring_containers/deploy.yml b/playbooks/adhoc/deploy_monitoring_containers/deploy.yml new file mode 100644 index 000000000..44df693d5 --- /dev/null +++ b/playbooks/adhoc/deploy_monitoring_containers/deploy.yml @@ -0,0 +1,58 @@ +--- +- name: Setup hosts + hosts: localhost + gather_facts: no + user: root + tasks: + - name: build inven + add_host: "name={{ hostvars[item]['ec2_public_dns_name'] }} groups=oo_hosts" + with_items: groups['tag_env-host-type_kwoodsontest2-openshift-node'] + + - debug: msg=oo_hosts + +- name: Deploy host-monitoring + hosts: oo_hosts + user: root + tasks: + - name: Deploy docker oso-f22-host-monitoring + command: docker pull docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring + + - name: Deploy oso-rhel7-zagg-client + command: docker pull docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client + + - name: Copy oso-f22-host-monitoring systemd file + copy: + src: oso-f22-host-monitoring.service + dest: /etc/systemd/system/oso-f22-host-monitoring.service + owner: root + group: root + mode: 0644 + register: pcp_systemd + + - name: Copy zagg-client systemd file + copy: + src: oso-rhel7-zagg-client.service + dest: /etc/systemd/system/oso-rhel7-zagg-client.service + owner: root + group: root + mode: 0644 + register: zagg_systemd + + - name: reload systemd + command: /usr/bin/systemctl --system daemon-reload + when: pcp_systemd.changed or zagg_systemd.changed + + - name: pasue for a few seconds + pause: seconds=5 + + - name: Start the oso-f22-host-monitoring service + service: + name: oso-f22-host-monitoring + state: started + enabled: yes + + - name: Start the oso-rhel7-zagg-client service + service: + name: oso-rhel7-zagg-client + state: started + enabled: yes diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service b/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service new file mode 100644 index 000000000..852be09b6 --- /dev/null +++ b/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service @@ -0,0 +1,36 @@ +# This is a systemd file to run this docker container under systemd. +# To make this work: +# * pull the image (probably from ops docker registry) +# * place this file in /etc/systemd/system without the .systemd extension +# * run the commands: +# systemctl daemon-reload +# systemctl enable pcp-docker +# systemctl start pcp-docker +# +# +[Unit] +Description=PCP Collector Contatainer +Requires=docker.service +After=docker.service + + +[Service] +Type=simple +TimeoutStartSec=5m +#Slice=container-small.slice + +ExecStartPre=-/usr/bin/docker rm "oso-f22-host-monitoring" + +ExecStart=/usr/bin/docker run --rm --name=oso-f22-host-monitoring \ + --privileged --net=host --pid=host --ipc=host \ + -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \ + -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \ + -v /var/log:/var/log \ + docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring + +ExecReload=-/usr/bin/docker stop "oso-f22-host-monitoring" +ExecReload=-/usr/bin/docker rm "oso-f22-host-monitoring" +ExecStop=-/usr/bin/docker stop "oso-f22-host-monitoring" + +[Install] +WantedBy=default.target diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service b/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service new file mode 100644 index 000000000..381c7b487 --- /dev/null +++ b/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service @@ -0,0 +1,39 @@ +# This is a systemd file to run this docker container under systemd. +# To make this work: +# * pull the image (probably from ops docker registry) +# * place this file in /etc/systemd/system without the .systemd extension +# * run the commands: +# systemctl daemon-reload +# systemctl enable zagg-client-docker +# systemctl start zagg-client-docker +# +# +[Unit] +Description=Zagg Client Contatainer +Requires=docker.service +After=docker.service + + +[Service] +Type=simple +TimeoutStartSec=5m +#Slice=container-small.slice + +ExecStartPre=-/usr/bin/docker rm "oso-rhel7-zagg-client" + + +ExecStart=/usr/bin/docker run --name oso-rhel7-zagg-client \ + -e ZAGG_SERVER=SERVERNAME \ + -e ZAGG_USER=USERNAME \ + -e ZAGG_PASSWORD=PASSWORD \ + -v /etc/localtime:/etc/localtime \ + -v /run/pcp:/run/pcp \ + docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client + + +ExecReload=-/usr/bin/docker stop "oso-rhel7-zagg-client" +ExecReload=-/usr/bin/docker rm "oso-rhel7-zagg-client" +ExecStop=-/usr/bin/docker stop "oso-rhel7-zagg-client" + +[Install] +WantedBy=default.target -- cgit v1.2.3 From e26c1af8943b8b8e73b550d82cd34c9b68ca913a Mon Sep 17 00:00:00 2001 From: Patrick Tescher Date: Mon, 27 Jul 2015 12:22:00 -0700 Subject: Use AWS m4 instances types AWS m4 replaces m3 and is a bit cheaper. --- README_AWS.md | 4 ++-- playbooks/aws/openshift-cluster/library/ec2_ami_find.py | 2 +- playbooks/aws/openshift-cluster/vars.online.int.yml | 8 ++++---- playbooks/aws/openshift-cluster/vars.online.prod.yml | 8 ++++---- playbooks/aws/openshift-cluster/vars.online.stage.yml | 8 ++++---- playbooks/aws/openshift-cluster/vars.yml | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) (limited to 'playbooks') diff --git a/README_AWS.md b/README_AWS.md index 0e3128a92..1c76916cb 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -40,7 +40,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne By default, a cluster is launched with the following configuration: -- Instance type: m3.large +- Instance type: m4.large - AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments) - Region: us-east-1 - Keypair name: libra @@ -62,7 +62,7 @@ Node specific defaults: If needed, these values can be changed by setting environment variables on your system. -- export ec2_instance_type='m3.large' +- export ec2_instance_type='m4.large' - export ec2_image='ami-307b3658' - export ec2_region='us-east-1' - export ec2_keypair='libra' diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py index 29e594a65..2b1db62d8 100644 --- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py +++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py @@ -158,7 +158,7 @@ EXAMPLES = ''' # Launch an EC2 instance - ec2: image: "{{ ami_search.results[0].ami_id }}" - instance_type: m3.medium + instance_type: m4.medium key_name: mykey wait: yes ''' diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index fc8b8d2d2..b9ee29b83 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m3.large +ec2_master_instance_type: m4.large ec2_master_security_groups: [ 'integration', 'integration-master' ] -ec2_infra_instance_type: m3.large +ec2_infra_instance_type: m4.large ec2_infra_security_groups: [ 'integration', 'integration-infra' ] -ec2_node_instance_type: m3.large +ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'integration', 'integration-node' ] -ec2_etcd_instance_type: m3.large +ec2_etcd_instance_type: m4.large ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index f68d41fc4..691582834 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m3.large +ec2_master_instance_type: m4.large ec2_master_security_groups: [ 'production', 'production-master' ] -ec2_infra_instance_type: m3.large +ec2_infra_instance_type: m4.large ec2_infra_security_groups: [ 'production', 'production-infra' ] -ec2_node_instance_type: m3.large +ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'production', 'production-node' ] -ec2_etcd_instance_type: m3.large +ec2_etcd_instance_type: m4.large ec2_etcd_security_groups: [ 'production', 'production-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index ce9869fcd..2ec43ad4c 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m3.large +ec2_master_instance_type: m4.large ec2_master_security_groups: [ 'stage', 'stage-master' ] -ec2_infra_instance_type: m3.large +ec2_infra_instance_type: m4.large ec2_infra_security_groups: [ 'stage', 'stage-infra' ] -ec2_node_instance_type: m3.large +ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'stage', 'stage-node' ] -ec2_etcd_instance_type: m3.large +ec2_etcd_instance_type: m4.large ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ] ec2_vpc_subnet: subnet-987c0def ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 07e453f89..fb1793a51 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -8,7 +8,7 @@ deployment_vars: ssh_user: fedora sudo: yes keypair: libra - type: m3.large + type: m4.large security_groups: [ 'public' ] vpc_subnet: assign_public_ip: @@ -20,7 +20,7 @@ deployment_vars: ssh_user: root sudo: no keypair: libra - type: m3.large + type: m4.large security_groups: [ 'public' ] vpc_subnet: assign_public_ip: @@ -32,7 +32,7 @@ deployment_vars: ssh_user: ec2-user sudo: yes keypair: libra - type: m3.large + type: m4.large security_groups: [ 'public' ] vpc_subnet: assign_public_ip: -- cgit v1.2.3 From 8bdaac7f60a5826b8e8518a6f8ded737bd713f82 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Mon, 27 Jul 2015 16:26:17 -0400 Subject: Setup openshift-node on the masters so that the web console can access the pods --- playbooks/aws/openshift-cluster/config.yml | 1 + playbooks/common/openshift-cluster/config.yml | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 6ee539c7e..8106d5da9 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -15,6 +15,7 @@ g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 0779cfe47..4c74f96db 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -39,6 +39,15 @@ ansible_sudo: "{{ g_sudo | default(omit) }}" with_items: groups[g_nodes_group] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + when: g_nodeonmaster is defined and g_nodeonmaster == true + - name: Evaluate oo_first_etcd add_host: name: "{{ groups[g_etcd_group][0] }}" -- cgit v1.2.3 From 276225820663090e8b88a9c2c6974e9f66c8632f Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Wed, 29 Jul 2015 09:41:25 -0400 Subject: added roles symlink for zabbix_setup, fixed URL to work with ZAIO instead of a test cluster instance. --- playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 4 +++- playbooks/adhoc/zabbix_setup/roles | 1 + playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 120000 playbooks/adhoc/zabbix_setup/roles (limited to 'playbooks') diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index bd71e6d1d..610d18b28 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -2,7 +2,9 @@ - hosts: localhost gather_facts: no vars: - g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php + # Use this for local ZAIO + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: Admin g_zpassword: zabbix roles: diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml index 286f699e5..8b44f2adf 100644 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml @@ -5,7 +5,9 @@ - vars/template_heartbeat.yml - vars/template_os_linux.yml vars: - g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php + # Use this for local ZAIO + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: Admin g_zpassword: zabbix roles: -- cgit v1.2.3 From 7aeadcf61aef6256962a4859f4753b69c8fccc0f Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 29 Jul 2015 16:46:00 -0400 Subject: Set loglevel=2 as our default across the board --- playbooks/aws/openshift-cluster/config.yml | 2 +- playbooks/byo/openshift-cluster/config.yml | 2 +- playbooks/gce/openshift-cluster/config.yml | 2 +- playbooks/libvirt/openshift-cluster/config.yml | 2 +- playbooks/openstack/openshift-cluster/config.yml | 2 +- roles/openshift_common/README.md | 2 +- roles/openshift_common/defaults/main.yml | 2 +- roles/openshift_common/tasks/main.yml | 2 +- roles/openshift_master/README.md | 2 +- roles/openshift_node/README.md | 4 ++-- roles/openshift_registry/README.md | 3 +-- roles/openshift_router/README.md | 3 +-- 12 files changed, 13 insertions(+), 15 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 8106d5da9..a8e3e27bb 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -17,7 +17,7 @@ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 + openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 2ee1d50a7..9e50a4a18 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -5,5 +5,5 @@ g_masters_group: "{{ 'masters' }}" g_nodes_group: "{{ 'nodes' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: 4 + openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 219ebe6a0..fd5dfcc72 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -19,6 +19,6 @@ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 + openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 98fe11251..c208eee81 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -20,5 +20,5 @@ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 + openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 3c9a231e3..a5ee2d6a5 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -15,6 +15,6 @@ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 + openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index eb4ef26e8..4d64b7e63 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -15,7 +15,7 @@ Role Variables | Name | Default value | | |---------------------------|-------------------|---------------------------------------------| | openshift_cluster_id | default | Cluster name if multiple OpenShift clusters | -| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_debug_level | 2 | Global openshift debug log verbosity | | openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) | | openshift_ip | UNDEF | Internal IP address to use for this host | | openshift_public_hostname | UNDEF | Public hostname to use for this host | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index 4d3e0fe9e..267c03605 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,3 +1,3 @@ --- openshift_cluster_id: 'default' -openshift_debug_level: 0 +openshift_debug_level: 2 diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index a7c565067..09cc4aaf7 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -4,7 +4,7 @@ role: common local_facts: cluster_id: "{{ openshift_cluster_id | default('default') }}" - debug_level: "{{ openshift_debug_level | default(0) }}" + debug_level: "{{ openshift_debug_level | default(2) }}" hostname: "{{ openshift_hostname | default(None) }}" ip: "{{ openshift_ip | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 19f77d145..0e7ef3aab 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -28,7 +28,7 @@ From this role: From openshift_common: | Name | Default Value | | |-------------------------------|----------------|----------------------------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_debug_level | 2 | Global openshift debug log verbosity | | openshift_public_ip | UNDEF | Public IP address to use for this host | | openshift_hostname | UNDEF | hostname to use for this instance | diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 5edb3b8dd..0ba1eca93 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -20,9 +20,9 @@ From this role: | oreg_url | UNDEF (Optional) | Default docker registry to use | From openshift_common: -| Name | Default Value | | +| Name | Default Value | | |-------------------------------|---------------------|---------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_debug_level | 2 | Global openshift debug log verbosity | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | diff --git a/roles/openshift_registry/README.md b/roles/openshift_registry/README.md index ec3b4a10b..8e66c483b 100644 --- a/roles/openshift_registry/README.md +++ b/roles/openshift_registry/README.md @@ -21,7 +21,7 @@ From openshift_common: | Name | Default value | | |-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_debug_level | 2 | Global openshift debug log verbosity | Dependencies @@ -41,4 +41,3 @@ Author Information ------------------ Red Hat openshift@redhat.com - diff --git a/roles/openshift_router/README.md b/roles/openshift_router/README.md index 6d8ee25c6..836efc443 100644 --- a/roles/openshift_router/README.md +++ b/roles/openshift_router/README.md @@ -19,7 +19,7 @@ From this role: From openshift_common: | Name | Default value | | |-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_debug_level | 2 | Global openshift debug log verbosity | Dependencies ------------ @@ -38,4 +38,3 @@ Author Information ------------------ Red Hat openshift@redhat.com - -- cgit v1.2.3 From 5d7753a8ecb03634f045b057dc33369178615f92 Mon Sep 17 00:00:00 2001 From: Pep Turró Mauri Date: Sat, 1 Aug 2015 17:22:24 +0200 Subject: Deploying enterprise with Vagrant --- README_vagrant.md | 28 ++++++++++++++++++++++++++-- Vagrantfile | 39 +++++++++++++++++++++++++++++++++------ playbooks/byo/vagrant.yml | 14 ++++++++++++++ 3 files changed, 73 insertions(+), 8 deletions(-) create mode 100644 playbooks/byo/vagrant.yml (limited to 'playbooks') diff --git a/README_vagrant.md b/README_vagrant.md index 26ec52c0a..5f87d6633 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -2,9 +2,28 @@ Requirements ------------ - vagrant (tested against version 1.7.2) - vagrant-hostmanager plugin (tested against version 1.5.0) +- vagrant-registration plugin (only required for enterprise deployment type) - vagrant-libvirt (tested against version 0.0.26) - Only required if using libvirt instead of virtualbox +For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant: + +1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads) + +2. Install it into vagrant + + ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box`` + +3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt) + + Resize the actual qcow2 image: + + ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB`` + + Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this: + + ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}`` + Usage ----- ``` @@ -21,5 +40,10 @@ vagrant provision Environment Variables --------------------- The following environment variables can be overriden: -- OPENSHIFT_DEPLOYMENT_TYPE (defaults to origin, choices: origin, enterprise, online) -- OPENSHIFT_NUM_NODES (the number of nodes to create, defaults to 2) +- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online) +- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) + +For ``enterprise`` deployment types these env variables should also be specified: +- ``rhel_subscription_user``: rhsm user +- ``rhel_subscription_pass``: rhsm password +- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects diff --git a/Vagrantfile b/Vagrantfile index f0aa0387b..20cf0b5bd 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -15,6 +15,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.hostmanager.manage_host = true config.hostmanager.include_offline = true config.ssh.insert_key = false + + if deployment_type === 'enterprise' + unless Vagrant.has_plugin?('vagrant-registration') + raise 'vagrant-registration-plugin is required for enterprise deployment' + end + username = ENV['rhel_subscription_user'] + password = ENV['rhel_subscription_pass'] + unless username and password + raise 'rhel_subscription_user and rhel_subscription_pass are required' + end + config.registration.username = username + config.registration.password = password + # FIXME this is temporary until vagrant/ansible registration modules + # are capable of handling specific subscription pools + if not ENV['rhel_subscription_pool'].nil? + config.vm.provision "shell" do |s| + s.inline = "subscription-manager attach --pool=$1 || true" + s.args = "#{ENV['rhel_subscription_pool']}" + end + end + end + config.vm.provider "virtualbox" do |vbox, override| override.vm.box = "chef/centos-7.1" vbox.memory = 1024 @@ -28,10 +50,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| libvirt.cpus = 2 libvirt.memory = 1024 libvirt.driver = 'kvm' - override.vm.box = "centos-7.1" - override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box" - override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" - override.vm.box_download_checksum_type = "sha256" + case deployment_type + when "enterprise" + override.vm.box = "rhel-7" + when "origin" + override.vm.box = "centos-7.1" + override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box" + override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" + override.vm.box_download_checksum_type = "sha256" + end end num_nodes.times do |n| @@ -53,12 +80,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ansible.sudo = true ansible.groups = { "masters" => ["master"], - "nodes" => ["node1", "node2"], + "nodes" => ["master", "node1", "node2"], } ansible.extra_vars = { openshift_deployment_type: deployment_type, } - ansible.playbook = "playbooks/byo/config.yml" + ansible.playbook = "playbooks/byo/vagrant.yml" end end end diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml new file mode 100644 index 000000000..c89f8775b --- /dev/null +++ b/playbooks/byo/vagrant.yml @@ -0,0 +1,14 @@ +--- +- hosts: all + vars: + deployment_type: "{{ openshift_deployment_type }}" + roles: + - role: rhel_subscribe + when: openshift_deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - openshift_repos + - os_update_latest + +- include: config.yml -- cgit v1.2.3 From 3548472edd08d09fafcb236790a44bcf31aa5f03 Mon Sep 17 00:00:00 2001 From: Pep Turró Mauri Date: Mon, 3 Aug 2015 16:29:25 +0200 Subject: Move rhel_subscribe tasks to its own playbook Allows reuse out of vagrant, e.g. to subscribe systems by its own --- playbooks/byo/rhel_subscribe.yml | 12 ++++++++++++ playbooks/byo/vagrant.yml | 12 +----------- 2 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 playbooks/byo/rhel_subscribe.yml (limited to 'playbooks') diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml new file mode 100644 index 000000000..60300c3dc --- /dev/null +++ b/playbooks/byo/rhel_subscribe.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + vars: + deployment_type: "{{ openshift_deployment_type }}" + roles: + - role: rhel_subscribe + when: openshift_deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - openshift_repos + - os_update_latest diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml index c89f8775b..76246e7b0 100644 --- a/playbooks/byo/vagrant.yml +++ b/playbooks/byo/vagrant.yml @@ -1,14 +1,4 @@ --- -- hosts: all - vars: - deployment_type: "{{ openshift_deployment_type }}" - roles: - - role: rhel_subscribe - when: openshift_deployment_type == "enterprise" and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest +- include: rhel_subscribe.yml - include: config.yml -- cgit v1.2.3 From e438f0c19e86241e11853970aa7e94e90c5fffeb Mon Sep 17 00:00:00 2001 From: Pep Turró Mauri Date: Mon, 3 Aug 2015 16:40:06 +0200 Subject: Use deployment_type, not openshift_deployment_type This seems to be what's used in other places --- Vagrantfile | 2 +- playbooks/byo/rhel_subscribe.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/Vagrantfile b/Vagrantfile index 20cf0b5bd..4675b5d60 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -83,7 +83,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| "nodes" => ["master", "node1", "node2"], } ansible.extra_vars = { - openshift_deployment_type: deployment_type, + deployment_type: deployment_type, } ansible.playbook = "playbooks/byo/vagrant.yml" end diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 60300c3dc..f564905ea 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,10 +1,10 @@ --- - hosts: all vars: - deployment_type: "{{ openshift_deployment_type }}" + openshift_deployment_type: "{{ deployment_type }}" roles: - role: rhel_subscribe - when: openshift_deployment_type == "enterprise" and + when: deployment_type == "enterprise" and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- cgit v1.2.3 From b497f7a2a77c3ef1434a5f0bd11fccf9d81b44aa Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Tue, 4 Aug 2015 11:10:35 -0300 Subject: Fix node labeling. Issue #305 --- filter_plugins/oo_filters.py | 11 +++++++++++ playbooks/common/openshift-node/config.yml | 5 ++++- roles/openshift_manage_node/tasks/main.yml | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 47033a88e..9c263f0dd 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -130,6 +130,16 @@ class FilterModule(object): rval.append("%s%s%s" % (item['key'], joiner, item['value'])) return rval + + @staticmethod + def oo_combine_dict(data, in_joiner='=', out_joiner=' '): + '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and + arrange them as a string 'key=value key=value' + ''' + if not issubclass(type(data), dict): + raise errors.AnsibleFilterError("|failed expects first param is a dict") + + return out_joiner.join([ in_joiner.join([k, v]) for k, v in data.items() ]) @staticmethod def oo_ami_selector(data, image_name): @@ -309,6 +319,7 @@ class FilterModule(object): "oo_ami_selector": self.oo_ami_selector, "oo_ec2_volume_definition": self.oo_ec2_volume_definition, "oo_combine_key_value": self.oo_combine_key_value, + "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6ef375bbb..122cfbf92 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -126,9 +126,12 @@ - name: Set scheduleability hosts: oo_first_master vars: + openshift_node_labels: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('openshift.node.labels') }}" openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.common.hostname') }}" + | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" pre_tasks: diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index d17f3f532..e64d6e713 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -16,3 +16,10 @@ command: > {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true with_items: openshift_scheduleable_nodes + +- name: Tag schedulable nodes + command: > + {{ openshift.common.client_binary }} label --overwrite node {{ item.0 }} {{ item.1 | oo_combine_dict }} + with_nested: + - openshift_scheduleable_nodes + - openshift_node_labels \ No newline at end of file -- cgit v1.2.3 From 424c9a5f7ae96a7f20e1baae25614c228591b94f Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Wed, 5 Aug 2015 10:34:44 -0400 Subject: Increase disk size for AWS --- playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 92155582e..236d84e74 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -99,7 +99,7 @@ iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" node: root: - volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}" + volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}" device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}" iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}" docker: -- cgit v1.2.3 From 0497eac6ad52db6aefc947d2ecb5843c42b236da Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Wed, 5 Aug 2015 09:53:37 -0500 Subject: namespace the byo inventory so the group names aren't so generic --- README_OSE.md | 10 +++++----- README_origin.md | 10 +++++----- Vagrantfile | 4 ++-- inventory/byo/hosts.example | 8 ++++---- playbooks/byo/openshift-cluster/config.yml | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) (limited to 'playbooks') diff --git a/README_OSE.md b/README_OSE.md index 31173e5d6..116717033 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -65,8 +65,8 @@ option to ansible-playbook. # Create an OSEv3 group that contains the masters and nodes groups [OSEv3:children] -masters -nodes +openshift_masters +openshift_nodes # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -96,11 +96,11 @@ openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] # host group for masters -[masters] +[openshift_masters] ose3-master.example.com # host group for nodes -[nodes] +[openshift_nodes] ose3-node[1:2].example.com ``` @@ -234,7 +234,7 @@ what we expect them to be (if not, we can override them). To override the the defaults, you can set the variables in your inventory: ``` ...snip... -[masters] +[openshift_masters] ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com ...snip... ``` diff --git a/README_origin.md b/README_origin.md index f13fe660a..a30ad2c71 100644 --- a/README_origin.md +++ b/README_origin.md @@ -54,8 +54,8 @@ option to ansible-playbook. # Create an OSEv3 group that contains the masters and nodes groups [OSv3:children] -masters -nodes +openshift_masters +openshift_nodes # Set variables common for all OSEv3 hosts [OSv3:vars] @@ -68,11 +68,11 @@ ansible_ssh_user=root deployment_type=origin # host group for masters -[masters] +[openshift_masters] osv3-master.example.com # host group for nodes -[nodes] +[openshift_nodes] osv3-node[1:2].example.com ``` @@ -207,7 +207,7 @@ what we expect them to be (if not, we can override them). To override the the defaults, you can set the variables in your inventory: ``` ...snip... -[masters] +[openshift_masters] osv3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=osv3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=osv3-master.public.example.com ...snip... ``` diff --git a/Vagrantfile b/Vagrantfile index a832ae84e..8e6796927 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -52,8 +52,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ansible.limit = 'all' ansible.sudo = true ansible.groups = { - "masters" => ["master"], - "nodes" => ["node1", "node2"], + "openshift_masters" => ["master"], + "openshift_nodes" => ["node1", "node2"], } ansible.extra_vars = { openshift_deployment_type: "origin", diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 4c652d06e..c8a4272f0 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -2,8 +2,8 @@ # Create an OSEv3 group that contains the masters and nodes groups [OSEv3:children] -masters -nodes +openshift_masters +openshift_nodes etcd # Set variables common for all OSEv3 hosts @@ -58,13 +58,13 @@ deployment_type=enterprise #osm_default_subdomain=apps.test.example.com # host group for masters -[masters] +[openshift_masters] ose3-master[1:3]-ansible.test.example.com [etcd] ose3-etcd[1:3]-ansible.test.example.com # host group for nodes -[nodes] +[openshift_nodes] ose3-master[1:3]-ansible.test.example.com openshift_scheduleable=False ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 2ee1d50a7..67d394e5d 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -2,8 +2,8 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" + g_masters_group: "{{ 'openshift_masters' }}" + g_nodes_group: "{{ 'openshift_nodes' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" -- cgit v1.2.3 From ef0986b5d45d7aba81ecd187c49688708d785a87 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 3 Aug 2015 13:04:36 -0400 Subject: Added a pv creation script --- playbooks/adhoc/create_pv/create_pv.yaml | 134 +++++++++++++++++++++++++++++++ playbooks/adhoc/create_pv/pv-template.j2 | 16 ++++ 2 files changed, 150 insertions(+) create mode 100644 playbooks/adhoc/create_pv/create_pv.yaml create mode 100644 playbooks/adhoc/create_pv/pv-template.j2 (limited to 'playbooks') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml new file mode 100644 index 000000000..c74734fb7 --- /dev/null +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -0,0 +1,134 @@ +--- +- name: Create a volume and attach it to master + hosts: localhost + gather_facts: no + vars: + cli_volume_type: gp2 + cli_volume_iops: '' + oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] | + intersect(groups['tag_environment_' ~ cli_environment]) | + first }}" + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_volume_size + - cli_device_name + - cli_hosttype + - cli_environment + + - name: set oo_name fact + set_fact: + oo_name: "{{ oo_name }}" + + + - name: Select a single master to run this on + add_host: + hostname: "{{ oo_name }}" + ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}" + groups: oo_master + + - name: Create a volume and attach it + ec2_vol: + state: present + instance: "{{ hostvars[oo_name]['ec2_id'] }}" + region: "{{ hostvars[oo_name]['ec2_region'] }}" + volume_size: "{{ cli_volume_size }}" + volume_type: "{{ cli_volume_type }}" + device_name: "{{ cli_device_name }}" + iops: "{{ cli_volume_iops }}" + register: vol + + - debug: var=vol + +- name: Configure the drive + gather_facts: no + hosts: oo_master + user: root + connection: ssh + vars: + pv_tmpdir: /tmp/persistentvolumes + + post_tasks: + - name: Setting facts for template + set_fact: + pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}" + vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}" + vol_id: "{{ hostvars.localhost.vol.volume_id }}" + vol_size: "{{ cli_volume_size }}" + pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}" + + - set_fact: + pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml" + + - name: "Mkdir {{ pv_tmpdir }}" + file: + state: directory + path: "{{ pv_tmpdir }}" + mode: '0750' + + - name: "Mkdir {{ pv_mntdir }}" + file: + state: directory + path: "{{ pv_mntdir }}" + mode: '0750' + + - name: Create pv file from template + template: + src: ./pv-template.j2 + dest: "{{ pv_template }}" + owner: root + mode: '0640' + + - name: mkfs + filesystem: + dev: "{{ cli_device_name }}" + fstype: ext4 + + - name: Mount the dev + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + fstype: ext4 + state: mounted + + - name: chgrp g+rwXs + file: + path: "{{ pv_mntdir }}" + mode: 'g+rwXs' + recurse: yes + seuser: system_u + serole: object_r + setype: svirt_sandbox_file_t + selevel: s0 + + - name: umount + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + state: unmounted + fstype: ext4 + + - name: detach drive + delegate_to: localhost + ec2_vol: + region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}" + id: "{{ hostvars.localhost.vol.volume_id }}" + instance: None + + - name: "Remove {{ pv_mntdir }}" + file: + state: absent + path: "{{ pv_mntdir }}" + + # We have to use the shell module because we can't set env vars with the command module. + - name: "Place PV into oc" + shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}" + register: oc_output + + - debug: var=oc_output + + - fail: + msg: "Failed to add {{ pv_template }} to master." + when: oc_output.rc != 0 diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2 new file mode 100644 index 000000000..5654ef6c4 --- /dev/null +++ b/playbooks/adhoc/create_pv/pv-template.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ pv_name }} + labels: + type: ebs +spec: + capacity: + storage: {{ vol_size }}Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + awsElasticBlockStore: + volumeID: aws://{{ vol_az }}/{{ vol_id }} + fsType: ext4 -- cgit v1.2.3 From 517557bd7e7bf22c5ccfc226df32e86dab70940a Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 6 Aug 2015 12:26:49 -0400 Subject: Revert "namespace the byo inventory so the group names aren't so generic" --- README_OSE.md | 10 +++++----- README_origin.md | 10 +++++----- Vagrantfile | 4 ++-- inventory/byo/hosts.example | 8 ++++---- playbooks/byo/openshift-cluster/config.yml | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) (limited to 'playbooks') diff --git a/README_OSE.md b/README_OSE.md index 116717033..31173e5d6 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -65,8 +65,8 @@ option to ansible-playbook. # Create an OSEv3 group that contains the masters and nodes groups [OSEv3:children] -openshift_masters -openshift_nodes +masters +nodes # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -96,11 +96,11 @@ openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] # host group for masters -[openshift_masters] +[masters] ose3-master.example.com # host group for nodes -[openshift_nodes] +[nodes] ose3-node[1:2].example.com ``` @@ -234,7 +234,7 @@ what we expect them to be (if not, we can override them). To override the the defaults, you can set the variables in your inventory: ``` ...snip... -[openshift_masters] +[masters] ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com ...snip... ``` diff --git a/README_origin.md b/README_origin.md index a30ad2c71..f13fe660a 100644 --- a/README_origin.md +++ b/README_origin.md @@ -54,8 +54,8 @@ option to ansible-playbook. # Create an OSEv3 group that contains the masters and nodes groups [OSv3:children] -openshift_masters -openshift_nodes +masters +nodes # Set variables common for all OSEv3 hosts [OSv3:vars] @@ -68,11 +68,11 @@ ansible_ssh_user=root deployment_type=origin # host group for masters -[openshift_masters] +[masters] osv3-master.example.com # host group for nodes -[openshift_nodes] +[nodes] osv3-node[1:2].example.com ``` @@ -207,7 +207,7 @@ what we expect them to be (if not, we can override them). To override the the defaults, you can set the variables in your inventory: ``` ...snip... -[openshift_masters] +[masters] osv3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=osv3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=osv3-master.public.example.com ...snip... ``` diff --git a/Vagrantfile b/Vagrantfile index 8e6796927..a832ae84e 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -52,8 +52,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ansible.limit = 'all' ansible.sudo = true ansible.groups = { - "openshift_masters" => ["master"], - "openshift_nodes" => ["node1", "node2"], + "masters" => ["master"], + "nodes" => ["node1", "node2"], } ansible.extra_vars = { openshift_deployment_type: "origin", diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index c8a4272f0..4c652d06e 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -2,8 +2,8 @@ # Create an OSEv3 group that contains the masters and nodes groups [OSEv3:children] -openshift_masters -openshift_nodes +masters +nodes etcd # Set variables common for all OSEv3 hosts @@ -58,13 +58,13 @@ deployment_type=enterprise #osm_default_subdomain=apps.test.example.com # host group for masters -[openshift_masters] +[masters] ose3-master[1:3]-ansible.test.example.com [etcd] ose3-etcd[1:3]-ansible.test.example.com # host group for nodes -[openshift_nodes] +[nodes] ose3-master[1:3]-ansible.test.example.com openshift_scheduleable=False ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 67d394e5d..2ee1d50a7 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -2,8 +2,8 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'openshift_masters' }}" - g_nodes_group: "{{ 'openshift_nodes' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" -- cgit v1.2.3 From e0f8681e66256e6bdf636b7b44de6781d348d182 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 6 Aug 2015 12:57:13 -0400 Subject: example added --- playbooks/adhoc/create_pv/create_pv.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index c74734fb7..684a0ca72 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -1,4 +1,12 @@ --- +#example run: +# ansible-playbook -e "cli_volume_size=1" \ +# -e "cli_device_name=/dev/xvdf" \ +# -e "cli_hosttype=master" \ +# -e "cli_environment=ops" \ +# create_pv.yaml +# FIXME: we need to change "environment" to "clusterid" as that's what it really is now. +# - name: Create a volume and attach it to master hosts: localhost gather_facts: no -- cgit v1.2.3 From c17efa0172a11f79cb28d3c5740b7c16ed70c3b8 Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Sun, 9 Aug 2015 12:40:28 -0300 Subject: Fix node labels --- playbooks/common/openshift-node/config.yml | 3 --- roles/openshift_manage_node/tasks/main.yml | 7 +++---- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 122cfbf92..4010b4c9e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -126,9 +126,6 @@ - name: Set scheduleability hosts: oo_first_master vars: - openshift_node_labels: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.node.labels') }}" openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname') }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 257bcee2c..c488af723 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -19,7 +19,6 @@ - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ item.0 }} {{ item.1 | oo_combine_dict }} - with_nested: - - openshift_nodes - - openshift_node_labels + {{ openshift.common.client_binary }} label --overwrite node {{ item }} {{ hostvars[item]['openshift_node_labels'] | oo_combine_dict }} + with_items: + - "{{ openshift_nodes }}" -- cgit v1.2.3 From 619a5ee2064f0aca1c3d199542db461a0fae9eb0 Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Tue, 11 Aug 2015 17:56:02 +0200 Subject: Fix infra node support on libvirt --- playbooks/libvirt/openshift-cluster/launch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 6630fa27d..830f9d216 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -42,7 +42,7 @@ count: "{{ num_infra }}" - include: tasks/launch_instances.yml vars: - instances: "{{ infra_names }}" + instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" -- cgit v1.2.3 From 1dff2ee13fbaabad521ead7bf83cd3c1a4d8de55 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 11 Aug 2015 15:49:05 -0400 Subject: removed deploy monitoring containders --- .../adhoc/deploy_monitoring_containers/deploy.yml | 58 ---------------------- .../oso-f22-host-monitoring.service | 36 -------------- .../oso-rhel7-zagg-client.service | 39 --------------- 3 files changed, 133 deletions(-) delete mode 100644 playbooks/adhoc/deploy_monitoring_containers/deploy.yml delete mode 100644 playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service delete mode 100644 playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service (limited to 'playbooks') diff --git a/playbooks/adhoc/deploy_monitoring_containers/deploy.yml b/playbooks/adhoc/deploy_monitoring_containers/deploy.yml deleted file mode 100644 index 44df693d5..000000000 --- a/playbooks/adhoc/deploy_monitoring_containers/deploy.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Setup hosts - hosts: localhost - gather_facts: no - user: root - tasks: - - name: build inven - add_host: "name={{ hostvars[item]['ec2_public_dns_name'] }} groups=oo_hosts" - with_items: groups['tag_env-host-type_kwoodsontest2-openshift-node'] - - - debug: msg=oo_hosts - -- name: Deploy host-monitoring - hosts: oo_hosts - user: root - tasks: - - name: Deploy docker oso-f22-host-monitoring - command: docker pull docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring - - - name: Deploy oso-rhel7-zagg-client - command: docker pull docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client - - - name: Copy oso-f22-host-monitoring systemd file - copy: - src: oso-f22-host-monitoring.service - dest: /etc/systemd/system/oso-f22-host-monitoring.service - owner: root - group: root - mode: 0644 - register: pcp_systemd - - - name: Copy zagg-client systemd file - copy: - src: oso-rhel7-zagg-client.service - dest: /etc/systemd/system/oso-rhel7-zagg-client.service - owner: root - group: root - mode: 0644 - register: zagg_systemd - - - name: reload systemd - command: /usr/bin/systemctl --system daemon-reload - when: pcp_systemd.changed or zagg_systemd.changed - - - name: pasue for a few seconds - pause: seconds=5 - - - name: Start the oso-f22-host-monitoring service - service: - name: oso-f22-host-monitoring - state: started - enabled: yes - - - name: Start the oso-rhel7-zagg-client service - service: - name: oso-rhel7-zagg-client - state: started - enabled: yes diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service b/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service deleted file mode 100644 index 852be09b6..000000000 --- a/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service +++ /dev/null @@ -1,36 +0,0 @@ -# This is a systemd file to run this docker container under systemd. -# To make this work: -# * pull the image (probably from ops docker registry) -# * place this file in /etc/systemd/system without the .systemd extension -# * run the commands: -# systemctl daemon-reload -# systemctl enable pcp-docker -# systemctl start pcp-docker -# -# -[Unit] -Description=PCP Collector Contatainer -Requires=docker.service -After=docker.service - - -[Service] -Type=simple -TimeoutStartSec=5m -#Slice=container-small.slice - -ExecStartPre=-/usr/bin/docker rm "oso-f22-host-monitoring" - -ExecStart=/usr/bin/docker run --rm --name=oso-f22-host-monitoring \ - --privileged --net=host --pid=host --ipc=host \ - -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \ - -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \ - -v /var/log:/var/log \ - docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring - -ExecReload=-/usr/bin/docker stop "oso-f22-host-monitoring" -ExecReload=-/usr/bin/docker rm "oso-f22-host-monitoring" -ExecStop=-/usr/bin/docker stop "oso-f22-host-monitoring" - -[Install] -WantedBy=default.target diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service b/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service deleted file mode 100644 index 381c7b487..000000000 --- a/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service +++ /dev/null @@ -1,39 +0,0 @@ -# This is a systemd file to run this docker container under systemd. -# To make this work: -# * pull the image (probably from ops docker registry) -# * place this file in /etc/systemd/system without the .systemd extension -# * run the commands: -# systemctl daemon-reload -# systemctl enable zagg-client-docker -# systemctl start zagg-client-docker -# -# -[Unit] -Description=Zagg Client Contatainer -Requires=docker.service -After=docker.service - - -[Service] -Type=simple -TimeoutStartSec=5m -#Slice=container-small.slice - -ExecStartPre=-/usr/bin/docker rm "oso-rhel7-zagg-client" - - -ExecStart=/usr/bin/docker run --name oso-rhel7-zagg-client \ - -e ZAGG_SERVER=SERVERNAME \ - -e ZAGG_USER=USERNAME \ - -e ZAGG_PASSWORD=PASSWORD \ - -v /etc/localtime:/etc/localtime \ - -v /run/pcp:/run/pcp \ - docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client - - -ExecReload=-/usr/bin/docker stop "oso-rhel7-zagg-client" -ExecReload=-/usr/bin/docker rm "oso-rhel7-zagg-client" -ExecStop=-/usr/bin/docker stop "oso-rhel7-zagg-client" - -[Install] -WantedBy=default.target -- cgit v1.2.3 From a073f179b26c0d110aa6a8b7fc560ca061e4dc5c Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Tue, 11 Aug 2015 15:52:38 -0400 Subject: Zabbix Idempotency --- git/.pylintrc | 6 +- playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 37 +- playbooks/adhoc/zabbix_setup/create_template.yml | 30 +- playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 9 +- .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 28 +- .../adhoc/zabbix_setup/vars/template_os_linux.yml | 304 +++++----------- roles/os_zabbix/library/__init__.py | 0 roles/os_zabbix/library/test.yml | 92 +++++ roles/os_zabbix/library/zbx_host.py | 162 +++++++++ roles/os_zabbix/library/zbx_hostgroup.py | 116 +++++++ roles/os_zabbix/library/zbx_item.py | 160 +++++++++ roles/os_zabbix/library/zbx_mediatype.py | 149 ++++++++ roles/os_zabbix/library/zbx_template.py | 126 +++++++ roles/os_zabbix/library/zbx_trigger.py | 175 ++++++++++ roles/os_zabbix/library/zbx_user.py | 146 ++++++++ roles/os_zabbix/library/zbx_usergroup.py | 160 +++++++++ roles/os_zabbix/library/zbxapi.py | 382 --------------------- 17 files changed, 1406 insertions(+), 676 deletions(-) create mode 100644 roles/os_zabbix/library/__init__.py create mode 100644 roles/os_zabbix/library/test.yml create mode 100644 roles/os_zabbix/library/zbx_host.py create mode 100644 roles/os_zabbix/library/zbx_hostgroup.py create mode 100644 roles/os_zabbix/library/zbx_item.py create mode 100644 roles/os_zabbix/library/zbx_mediatype.py create mode 100644 roles/os_zabbix/library/zbx_template.py create mode 100644 roles/os_zabbix/library/zbx_trigger.py create mode 100644 roles/os_zabbix/library/zbx_user.py create mode 100644 roles/os_zabbix/library/zbx_usergroup.py delete mode 100755 roles/os_zabbix/library/zbxapi.py (limited to 'playbooks') diff --git a/git/.pylintrc b/git/.pylintrc index af8f1656f..fe6eef6de 100644 --- a/git/.pylintrc +++ b/git/.pylintrc @@ -71,7 +71,7 @@ confidence= # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" # w0511 - fixme - disabled because TODOs are acceptable -disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511 +disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801 [REPORTS] @@ -205,7 +205,7 @@ docstring-min-length=-1 [SIMILARITIES] # Minimum lines number of a similarity. -min-similarity-lines=4 +min-similarity-lines=0 # Ignore comments when computing similarities. ignore-comments=yes @@ -214,7 +214,7 @@ ignore-comments=yes ignore-docstrings=yes # Ignore imports when computing similarities. -ignore-imports=no +ignore-imports=yes [VARIABLES] diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index 610d18b28..a31cbef65 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -2,67 +2,50 @@ - hosts: localhost gather_facts: no vars: - # Use this for local ZAIO g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin g_zpassword: zabbix roles: - - ../roles/os_zabbix + - ../../../roles/os_zabbix post_tasks: - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: list - params: - output: extend - search: - host: 'Template Heartbeat' + name: 'Template Heartbeat' register: templ_heartbeat - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: list - params: - output: extend - search: - host: 'Template App Zabbix Server' + name: 'Template App Zabbix Server' register: templ_zabbix_server - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: list - params: - output: extend - search: - host: 'Template App Zabbix Agent' + name: 'Template App Zabbix Agent' register: templ_zabbix_agent - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: list register: templates - debug: var=templ_heartbeat.results - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: absent - params: "{{templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('templateid') }}" - register: template_results + with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml index b055e78eb..60fb27666 100644 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ b/playbooks/adhoc/zabbix_setup/create_template.yml @@ -2,16 +2,14 @@ - debug: var=ctp_template - name: Create Template - zbxapi: + zbx_template: server: "{{ ctp_zserver }}" user: "{{ ctp_zuser }}" password: "{{ ctp_zpassword }}" - zbx_class: Template - state: present - params: "{{ ctp_template.params }}" - register: ctp_created_templates + name: "{{ ctp_template.name }}" + register: ctp_created_template -- debug: var=ctp_created_templates +- debug: var=ctp_created_template #- name: Create Application # zbxapi: @@ -22,7 +20,7 @@ # state: present # params: # name: "{{ ctp_template.application.name}}" -# hostid: "{{ ctp_created_templates.results[0].templateid }}" +# hostid: "{{ ctp_created_template.results[0].templateid }}" # search: # name: "{{ ctp_template.application.name}}" # register: ctp_created_application @@ -30,28 +28,28 @@ #- debug: var=ctp_created_application - name: Create Items - zbxapi: + zbx_item: server: "{{ ctp_zserver }}" user: "{{ ctp_zuser }}" password: "{{ ctp_zpassword }}" - zbx_class: Item - state: present - params: "{{ item | oo_set_zbx_item_hostid(ctp_created_templates.results) }}" + name: "{{ item.name }}" + key: "{{ item.key }}" + value_type: "{{ item.value_type | default('int') }}" + template_name: "{{ ctp_template.name }}" with_items: ctp_template.zitems register: ctp_created_items #- debug: var=ctp_created_items - name: Create Triggers - zbxapi: + zbx_trigger: server: "{{ ctp_zserver }}" user: "{{ ctp_zuser }}" password: "{{ ctp_zpassword }}" - zbx_class: Trigger - state: present - params: "{{ item }}" + description: "{{ item.description }}" + expression: "{{ item.expression }}" + priority: "{{ item.priority }}" with_items: ctp_template.ztriggers - register: ctp_created_triggers when: ctp_template.ztriggers is defined #- debug: var=ctp_created_triggers diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml index 8b44f2adf..1729194b5 100644 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml @@ -5,22 +5,17 @@ - vars/template_heartbeat.yml - vars/template_os_linux.yml vars: - # Use this for local ZAIO g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin g_zpassword: zabbix roles: - - ../roles/os_zabbix + - ../../../roles/os_zabbix post_tasks: - - zbxapi: + - zbx_template: server: "{{ g_zserver }}" user: "{{ g_zuser }}" password: "{{ g_zpassword }}" - zbx_class: Template state: list - params: - output: extend register: templates - debug: var=templates diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml index 9d6145ec4..22cc75554 100644 --- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml +++ b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml @@ -1,33 +1,11 @@ --- g_template_heartbeat: - application: - name: Heartbeat -#output: extend - search: - name: Heartbeat - params: - name: Template Heartbeat - host: Template Heartbeat - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Heartbeat + name: Template Heartbeat zitems: - name: Heartbeat Ping hostid: - key_: heartbeat.ping - type: 2 - value_type: 1 - output: extend - search: - key_: heartbeat.ping - selectApplications: extend + key: heartbeat.ping ztriggers: - description: 'Heartbeat.ping has failed on {HOST.NAME}' expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Heartbeat.ping has failed on*' - expandExpression: True + priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml index b89711632..6fab08879 100644 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml @@ -1,248 +1,120 @@ --- g_template_os_linux: - application: - name: OS Linux - output: extend - search: - name: OS Linux - params: - name: Template OS Linux - host: Template OS Linux - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template OS Linux + name: Template OS Linux zitems: - - hostid: null - key_: kernel.uname.sysname + - key: kernel.uname.sysname name: kernel.uname.sysname - search: - key_: kernel.uname.sysname - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.wait.total + value_type: string + + - key: kernel.all.cpu.wait.total name: kernel.all.cpu.wait.total - search: - key_: kernel.all.cpu.wait.total - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.irq.hard + value_type: int + + - key: kernel.all.cpu.irq.hard name: kernel.all.cpu.irq.hard - search: - key_: kernel.all.cpu.irq.hard - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.idle + value_type: int + + - key: kernel.all.cpu.idle name: kernel.all.cpu.idle - search: - key_: kernel.all.cpu.idle - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.uname.distro + value_type: int + + - key: kernel.uname.distro name: kernel.uname.distro - search: - key_: kernel.uname.distro - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: kernel.uname.nodename + value_type: string + + - key: kernel.uname.nodename name: kernel.uname.nodename - search: - key_: kernel.uname.nodename - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.irq.soft + value_type: string + + - key: kernel.all.cpu.irq.soft name: kernel.all.cpu.irq.soft - search: - key_: kernel.all.cpu.irq.soft - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.load.15_minute + value_type: int + + - key: kernel.all.load.15_minute name: kernel.all.load.15_minute - search: - key_: kernel.all.load.15_minute - type: 2 - value_type: 0 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.sys + value_type: float + + - key: kernel.all.cpu.sys name: kernel.all.cpu.sys - search: - key_: kernel.all.cpu.sys - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.load.5_minute + value_type: int + + - key: kernel.all.load.5_minute name: kernel.all.load.5_minute - search: - key_: kernel.all.load.5_minute - type: 2 - value_type: 0 - selectApplications: extend - - hostid: null - key_: mem.freemem + value_type: float + + - key: mem.freemem name: mem.freemem - search: - key_: mem.freemem - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.nice + value_type: int + + - key: kernel.all.cpu.nice name: kernel.all.cpu.nice - search: - key_: kernel.all.cpu.nice - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: mem.util.bufmem + value_type: int + + - key: mem.util.bufmem name: mem.util.bufmem - search: - key_: mem.util.bufmem - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: swap.used + value_type: int + + - key: swap.used name: swap.used - search: - key_: swap.used - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.load.1_minute + value_type: int + + - key: kernel.all.load.1_minute name: kernel.all.load.1_minute - search: - key_: kernel.all.load.1_minute - type: 2 - value_type: 0 - selectApplications: extend - - hostid: null - key_: kernel.uname.version + value_type: float + + - key: kernel.uname.version name: kernel.uname.version - search: - key_: kernel.uname.version - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: swap.length + value_type: string + + - key: swap.length name: swap.length - search: - key_: swap.length - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: mem.physmem + value_type: int + + - key: mem.physmem name: mem.physmem - search: - key_: mem.physmem - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.uptime + value_type: int + + - key: kernel.all.uptime name: kernel.all.uptime - search: - key_: kernel.all.uptime - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: swap.free + value_type: int + + - key: swap.free name: swap.free - search: - key_: swap.free - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: mem.util.used + value_type: int + + - key: mem.util.used name: mem.util.used - search: - key_: mem.util.used - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.user + value_type: int + + - key: kernel.all.cpu.user name: kernel.all.cpu.user - search: - key_: kernel.all.cpu.user - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.uname.machine + value_type: int + + - key: kernel.uname.machine name: kernel.uname.machine - search: - key_: kernel.uname.machine - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: hinv.ncpu + value_type: string + + - key: hinv.ncpu name: hinv.ncpu - search: - key_: hinv.ncpu - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: mem.util.cached + value_type: int + + - key: mem.util.cached name: mem.util.cached - search: - key_: mem.util.cached - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.cpu.steal + value_type: int + + - key: kernel.all.cpu.steal name: kernel.all.cpu.steal - search: - key_: kernel.all.cpu.steal - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.all.pswitch + value_type: int + + + - key: kernel.all.pswitch name: kernel.all.pswitch - search: - key_: kernel.all.pswitch - type: 2 - value_type: 3 - selectApplications: extend - - hostid: null - key_: kernel.uname.release + value_type: int + + - key: kernel.uname.release name: kernel.uname.release - search: - key_: kernel.uname.release - type: 2 - value_type: 4 - selectApplications: extend - - hostid: null - key_: proc.nprocs + value_type: string + + - key: proc.nprocs name: proc.nprocs - search: - key_: proc.nprocs - type: 2 - value_type: 3 - selectApplications: extend + value_type: int diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml new file mode 100644 index 000000000..f585bcbb2 --- /dev/null +++ b/roles/os_zabbix/library/test.yml @@ -0,0 +1,92 @@ +--- +# This is a test playbook to create one of each of the zabbix ansible modules. +# ensure that the zbxapi module is installed +# ansible-playbook test.yml +- name: Test zabbix ansible module + hosts: localhost + gather_facts: no + vars: + zbx_server: http://localhost/zabbix/api_jsonrpc.php + zbx_user: Admin + zbx_password: zabbix + + pre_tasks: + - name: Create a template + zbx_template: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test template' + register: template_output + + - debug: var=template_output + + - name: Create an item + zbx_item: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test item' + key: 'kenny.item.1' + template_name: "{{ template_output.results[0].host }}" + register: item_output + + - debug: var=item_output + + - name: Create an trigger + zbx_trigger: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + expression: '{test template:kenny.item.1.last()}>2' + desc: 'Kenny desc' + register: trigger_output + + - debug: var=trigger_output + + - name: Create a hostgroup + zbx_hostgroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny hostgroup' + register: hostgroup_output + + - debug: var=hostgroup_output + + - name: Create a host + zbx_host: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny host' + hostgroups: + - 'kenny hostgroup' + register: host_output + + - debug: var=host_output + + - name: Create a usergroup + zbx_usergroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: kenny usergroup + rights: + - 'kenny hostgroup': rw + register: usergroup_output + + - debug: var=usergroup_output + + - name: Create a user + zbx_user: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + alias: kenny user + passwd: zabbix + usergroups: + - kenny usergroup + register: user_output + + - debug: var=user_output diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py new file mode 100644 index 000000000..d75dfdea1 --- /dev/null +++ b/roles/os_zabbix/library/zbx_host.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python +''' +Zabbix host ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_group_ids(zapi, hostgroup_names): + ''' + get hostgroups + ''' + # Fetch groups by name + group_ids = [] + for hgr in hostgroup_names: + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) + if content.has_key('result'): + group_ids.append({'groupid': content['result'][0]['groupid']}) + + return group_ids + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append({'templateid': content['results'][0]['templateid']}) + return template_ids + +def main(): + ''' + Ansible module for zabbix host + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + hostgroup_names=dict(default=[], type='list'), + template_names=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + interfaces=dict(default=[], type='list'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'host' + idname = "hostid" + hname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': hname}, + 'selectGroups': 'groupid', + 'selectParentTemplates': 'templateid', + 'selectInterfaces': 'interfaceid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'host': hname, + 'groups': get_group_ids(zapi, module.params('hostgroup_names')), + 'templates': get_template_ids(zapi, module.params('template_names')), + 'interfaces': module.params.get('interfaces', [{'type': 1, # interface type, 1 = agent + 'main': 1, # default interface? 1 = true + 'useip': 1, # default interface? 1 = true + 'ip': '127.0.0.1', # default interface? 1 = true + 'dns': '', # dns for host + 'port': '10050', # port for interface? 10050 + }]) + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py new file mode 100644 index 000000000..a1eb875d4 --- /dev/null +++ b/roles/os_zabbix/library/zbx_hostgroup.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +''' Ansible module for hostgroup +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix hostgroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' ansible module for hostgroup + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'hostgroup' + idname = "groupid" + hname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': hname}, + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': hname} + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py new file mode 100644 index 000000000..6cfb16d48 --- /dev/null +++ b/roles/os_zabbix/library/zbx_item.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +''' + Ansible module for zabbix items +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix item ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def main(): + ''' + ansible zabbix module for zbx_item + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + template_name=dict(default=None, type='str'), + zabbix_type=dict(default=2, type='int'), + value_type=dict(default='int', type='str'), + applications=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'item' + idname = "itemid" + state = module.params['state'] + key = module.params['key'] + template_name = module.params['template_name'] + + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + templateid = None + if content['result']: + templateid = content['result'][0]['templateid'] + else: + module.exit_json(changed=False, + results='Error: Could find template with name %s for item.' % template_name, + state="Unkown") + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'key_': key}, + 'selectApplications': 'applicationid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': module.params['name'], + 'key_': key, + 'hostid': templateid, + 'type': module.params['zabbix_type'], + 'value_type': get_value_type(module.params['value_type']), + 'applications': module.params['applications'], + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py new file mode 100644 index 000000000..a49aecd0f --- /dev/null +++ b/roles/os_zabbix/library/zbx_mediatype.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +''' + Ansible module for mediatype +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix mediatype ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True +def get_mtype(mtype): + ''' + Transport used by the media type. + Possible values: + 0 - email; + 1 - script; + 2 - SMS; + 3 - Jabber; + 100 - Ez Texting. + ''' + mtype = mtype.lower() + media_type = None + if mtype == 'script': + media_type = 1 + elif mtype == 'sms': + media_type = 2 + elif mtype == 'jabber': + media_type = 3 + elif mtype == 'script': + media_type = 100 + else: + media_type = 0 + + return media_type + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + description=dict(default=None, type='str'), + mtype=dict(default=None, type='str'), + smtp_server=dict(default=None, type='str'), + smtp_helo=dict(default=None, type='str'), + smtp_email=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'mediatype' + idname = "mediatypeid" + description = module.params['description'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'type': get_mtype(module.params['media_type']), + 'smtp_server': module.params['smtp_server'], + 'smtp_helo': module.params['smtp_helo'], + 'smtp_email': module.params['smtp_email'], + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and \ + zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py new file mode 100644 index 000000000..676fa7e49 --- /dev/null +++ b/roles/os_zabbix/library/zbx_template.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +''' +Ansible module for template +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix template ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' Ansible module for template + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug']) + zapi = ZabbixAPI(zbc) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'template' + idname = 'templateid' + tname = module.params['name'] + state = module.params['state'] + # get a template, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': tname}, + 'selectParentTemplates': 'templateid', + 'selectGroups': 'groupid', + #'selectApplications': extend, + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'groups': module.params.get('groups', [{'groupid': '1'}]), + 'host': tname, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py new file mode 100644 index 000000000..7cc9356c8 --- /dev/null +++ b/roles/os_zabbix/library/zbx_trigger.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +''' +ansible module for zabbix triggers +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix trigger ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + + +def get_priority(priority): + ''' determine priority + ''' + prior = 0 + if 'info' in priority: + prior = 1 + elif 'warn' in priority: + prior = 2 + elif 'avg' == priority or 'ave' in priority: + prior = 3 + elif 'high' in priority: + prior = 4 + elif 'dis' in priority: + prior = 5 + + return prior + +def get_deps(zapi, deps): + ''' get trigger dependencies + ''' + results = [] + for desc in deps: + content = zapi.get_content('trigger', + 'get', + {'search': {'description': desc}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if content.has_key('result'): + results.append({'triggerid': content['result'][0]['triggerid']}) + + return results + +def main(): + ''' + Create a trigger in zabbix + + Example: + "params": { + "description": "Processor load is too high on {HOST.NAME}", + "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", + "dependencies": [ + { + "triggerid": "14062" + } + ] + }, + + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + expression=dict(default=None, type='str'), + description=dict(default=None, type='str'), + dependencies=dict(default=[], type='list'), + priority=dict(default='avg', type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'trigger' + idname = "triggerid" + state = module.params['state'] + description = module.params['description'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'description': description}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'expression': module.params['expression'], + 'dependencies': get_deps(zapi, module.params['dependencies']), + 'priority': get_priority(module.params['priority']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py new file mode 100644 index 000000000..489023407 --- /dev/null +++ b/roles/os_zabbix/library/zbx_user.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +''' +ansible module for zabbix users +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_usergroups(zapi, usergroups): + ''' Get usergroups + ''' + ugroups = [] + for ugr in usergroups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': ugr}, + #'selectUsers': 'userid', + #'getRights': 'extend' + }) + if content['result']: + ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) + + return ugroups + +def main(): + ''' + ansible zabbix module for users + ''' + + ##def user(self, name, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + alias=dict(default=None, type='str'), + passwd=dict(default=None, type='str'), + usergroups=dict(default=None, type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zbc = ZabbixConnection(module.params['server'], user, password, module.params['debug']) + zapi = ZabbixAPI(zbc) + + ## before we can create a user media and users with media types we need media + zbx_class_name = 'user' + idname = "userid" + alias = module.params['alias'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'output': 'extend', + 'search': {'alias': alias}, + "selectUsrgrps": 'usergrpid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'alias': alias, + 'passwd': module.params['passwd'], + 'usrgrps': get_usergroups(zapi, module.params['usergroups']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'passwd': + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py new file mode 100644 index 000000000..ede4c9df1 --- /dev/null +++ b/roles/os_zabbix/library/zbx_usergroup.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +''' +zabbix ansible module for usergroups +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix usergroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rights(zapi, rights): + '''Get rights + ''' + perms = [] + for right in rights: + hstgrp = right.keys()[0] + perm = right.values()[0] + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) + if content['result']: + permission = 0 + if perm == 'ro': + permission = 2 + elif perm == 'rw': + permission = 3 + perms.append({'id': content['result'][0]['groupid'], + 'permission': permission}) + return perms + +def get_userids(zapi, users): + ''' Get userids from user aliases + ''' + userids = [] + for alias in users: + content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) + if content['result']: + userids.append(content['result'][0]['userid']) + + return userids + +def main(): + ''' Ansible module for usergroup + ''' + + ##def usergroup(self, name, rights=None, users=None, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + rights=dict(default=[], type='list'), + users=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + zbx_class_name = 'usergroup' + idname = "usrgrpid" + uname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': uname}, + 'selectUsers': 'userid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': uname, + 'rights': get_rights(zapi, module.params['rights']), + 'userids': get_userids(zapi, module.params['users']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'rights': + differences['rights'] = value + + elif key == 'userids' and zab_results.has_key('users'): + if zab_results['users'] != value: + differences['userids'] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py deleted file mode 100755 index 48f294938..000000000 --- a/roles/os_zabbix/library/zbxapi.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env python -# vim: expandtab:tabstop=4:shiftwidth=4 -''' - ZabbixAPI ansible module -''' - -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Purpose: An ansible module to communicate with zabbix. -# - -# pylint: disable=line-too-long -# Disabling line length for readability - -import json -import httplib2 -import sys -import os -import re -import copy - -class ZabbixAPIError(Exception): - ''' - ZabbixAPIError - Exists to propagate errors up from the api - ''' - pass - -class ZabbixAPI(object): - ''' - ZabbixAPI class - ''' - classes = { - 'Action': ['create', 'delete', 'get', 'update'], - 'Alert': ['get'], - 'Application': ['create', 'delete', 'get', 'massadd', 'update'], - 'Configuration': ['export', 'import'], - 'Dcheck': ['get'], - 'Dhost': ['get'], - 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Dservice': ['get'], - 'Event': ['acknowledge', 'get'], - 'Graph': ['create', 'delete', 'get', 'update'], - 'Graphitem': ['get'], - 'Graphprototype': ['create', 'delete', 'get', 'update'], - 'History': ['get'], - 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], - 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'], - 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], - 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Image': ['create', 'delete', 'get', 'update'], - 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Maintenance': ['create', 'delete', 'get', 'update'], - 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Mediatype': ['create', 'delete', 'get', 'update'], - 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Screen': ['create', 'delete', 'get', 'update'], - 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'], - 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'], - 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'], - 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], - 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], - 'Templatescreenitem': ['get'], - 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'], - 'Triggerprototype': ['create', 'delete', 'get', 'update'], - 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'], - 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'], - 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'], - 'Usermedia': ['get'], - } - - def __init__(self, data=None): - if not data: - data = {} - self.server = data.get('server', None) - self.username = data.get('user', None) - self.password = data.get('password', None) - if any([value == None for value in [self.server, self.username, self.password]]): - print 'Please specify zabbix server url, username, and password.' - sys.exit(1) - - self.verbose = data.get('verbose', False) - self.use_ssl = data.has_key('use_ssl') - self.auth = None - - for cname, _ in self.classes.items(): - setattr(self, cname.lower(), getattr(self, cname)(self)) - - # pylint: disable=no-member - # This method does not exist until the metaprogramming executed - results = self.user.login(user=self.username, password=self.password) - - if results[0]['status'] == '200': - if results[1].has_key('result'): - self.auth = results[1]['result'] - elif results[1].has_key('error'): - print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error']) - sys.exit(1) - else: - print "Error in call to zabbix. Http status: {0}.".format(results[0]['status']) - sys.exit(1) - - def perform(self, method, rpc_params): - ''' - This method calls your zabbix server. - - It requires the following parameters in order for a proper request to be processed: - jsonrpc - the version of the JSON-RPC protocol used by the API; - the Zabbix API implements JSON-RPC version 2.0; - method - the API method being called; - rpc_params - parameters that will be passed to the API method; - id - an arbitrary identifier of the request; - auth - a user authentication token; since we don't have one yet, it's set to null. - ''' - http_method = "POST" - jsonrpc = "2.0" - rid = 1 - - http = None - if self.use_ssl: - http = httplib2.Http() - else: - http = httplib2.Http(disable_ssl_certificate_validation=True,) - - headers = {} - headers["Content-type"] = "application/json" - - body = { - "jsonrpc": jsonrpc, - "method": method, - "params": rpc_params.get('params', {}), - "id": rid, - 'auth': self.auth, - } - - if method in ['user.login', 'api.version']: - del body['auth'] - - body = json.dumps(body) - - if self.verbose: - print body - print method - print headers - httplib2.debuglevel = 1 - - response, content = http.request(self.server, http_method, body, headers) - - if response['status'] not in ['200', '201']: - raise ZabbixAPIError('Error calling zabbix. Zabbix returned %s' % response['status']) - - if self.verbose: - print response - print content - - try: - content = json.loads(content) - except ValueError as err: - content = {"error": err.message} - - return response, content - - @staticmethod - def meta(cname, method_names): - ''' - This bit of metaprogramming is where the ZabbixAPI subclasses are created. - For each of ZabbixAPI.classes we create a class from the key and methods - from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class - to each subclass in order for each to be able to call the perform method. - ''' - def meta_method(_class, method_name): - ''' - This meta method allows a class to add methods to it. - ''' - # This template method is a stub method for each of the subclass - # methods. - def template_method(self, params=None, **rpc_params): - ''' - This template method is a stub method for each of the subclass methods. - ''' - if params: - rpc_params['params'] = params - else: - rpc_params['params'] = copy.deepcopy(rpc_params) - - return self.parent.perform(cname.lower()+"."+method_name, rpc_params) - - template_method.__doc__ = \ - "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % \ - (cname.lower(), method_name) - template_method.__name__ = method_name - # this is where the template method is placed inside of the subclass - # e.g. setattr(User, "create", stub_method) - setattr(_class, template_method.__name__, template_method) - - # This class call instantiates a subclass. e.g. User - _class = type(cname, - (object,), - {'__doc__': \ - "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % cname.lower()}) - def __init__(self, parent): - ''' - This init method gets placed inside of the _class - to allow it to be instantiated. A reference to the parent class(ZabbixAPI) - is passed in to allow each class access to the perform method. - ''' - self.parent = parent - - # This attaches the init to the subclass. e.g. Create - setattr(_class, __init__.__name__, __init__) - # For each of our ZabbixAPI.classes dict values - # Create a method and attach it to our subclass. - # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile', - # 'update', 'iswritable', 'logout', 'addmedia', 'create', - # 'login', 'deletemedia', 'isreadable'], - # User.delete - # User.get - for method_name in method_names: - meta_method(_class, method_name) - # Return our subclass with all methods attached - return _class - -# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming -for _class_name, _method_names in ZabbixAPI.classes.items(): - setattr(ZabbixAPI, _class_name, ZabbixAPI.meta(_class_name, _method_names)) - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def diff_content(from_zabbix, from_user, ignore=None): - ''' Compare passed in object to results returned from zabbix - ''' - terms = ['search', 'output', 'groups', 'select', 'expand', 'filter'] - if ignore: - terms.extend(ignore) - regex = '(' + '|'.join(terms) + ')' - retval = {} - for key, value in from_user.items(): - if re.findall(regex, key): - continue - - # special case here for templates. You query templates and - # the zabbix api returns parentTemplates. These will obviously fail. - # So when its templates compare against parentTemplates. - if key == 'templates' and from_zabbix.has_key('parentTemplates'): - if from_zabbix['parentTemplates'] != value: - retval[key] = value - - elif from_zabbix[key] != str(value): - retval[key] = str(value) - - return retval - -def main(): - ''' - This main method runs the ZabbixAPI Ansible Module - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - zbx_class=dict(choices=ZabbixAPI.classes.keys()), - params=dict(), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ignore=dict(default=None, type='list'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', None) - if not user: - user = os.environ['ZABBIX_USER'] - - passwd = module.params.get('password', None) - if not passwd: - passwd = os.environ['ZABBIX_PASSWORD'] - - - - api_data = { - 'user': user, - 'password': passwd, - 'server': module.params['server'], - 'verbose': module.params['debug'] - } - - if not user or not passwd or not module.params['server']: - module.fail_json(msg='Please specify the user, password, and the zabbix server.') - - zapi = ZabbixAPI(api_data) - - ignore = module.params['ignore'] - zbx_class = module.params.get('zbx_class') - rpc_params = module.params.get('params', {}) - state = module.params.get('state') - - - # Get the instance we are trying to call - zbx_class_inst = zapi.__getattribute__(zbx_class.lower()) - - # perform get - # Get the instance's method we are trying to call - - zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['get'] - _, content = zbx_action_method(zbx_class_inst, rpc_params) - - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - # If we are coming from a query, we need to pass in the correct rpc_params for delete. - # specifically the zabbix class name + 'id' - # if rpc_params is a list then we need to pass it. (list of ids to delete) - idname = zbx_class.lower() + "id" - if not isinstance(rpc_params, list) and content['result'][0].has_key(idname): - rpc_params = [content['result'][0][idname]] - - zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['delete'] - _, content = zbx_action_method(zbx_class_inst, rpc_params) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - # It's not there, create it! - if not exists(content): - zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['create'] - _, content = zbx_action_method(zbx_class_inst, rpc_params) - module.exit_json(changed=True, results=content['result'], state='present') - - # It's there and the same, do nothing! - diff_params = diff_content(content['result'][0], rpc_params, ignore) - if not diff_params: - module.exit_json(changed=False, results=content['result'], state="present") - - # Add the id to update with - idname = zbx_class.lower() + "id" - diff_params[idname] = content['result'][0][idname] - - - ## It's there and not the same, update it! - zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['update'] - _, content = zbx_action_method(zbx_class_inst, diff_params) - module.exit_json(changed=True, results=content, state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() - -- cgit v1.2.3 From 6a177ba46fc232f28e430858bdc0c082b912f026 Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Tue, 11 Aug 2015 16:13:38 +0200 Subject: Infra node support for OpenStack --- .../openshift-cluster/files/heat_stack.yaml | 103 +++++++++++++++++++-- .../openshift-cluster/files/heat_stack_server.yaml | 9 +- playbooks/openstack/openshift-cluster/launch.yml | 4 +- 3 files changed, 105 insertions(+), 11 deletions(-) (limited to 'playbooks') diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index a15ec749c..d53884e0d 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -16,8 +16,13 @@ parameters: num_nodes: type: number - label: Number of nodes - description: Number of nodes + label: Number of compute nodes + description: Number of compute nodes + + num_infra: + type: number + label: Number of infrastructure nodes + description: Number of infrastructure nodes cidr: type: string @@ -55,7 +60,12 @@ parameters: node_image: type: string label: Node image - description: Name of the image for the node servers + description: Name of the image for the compute node servers + + infra_image: + type: string + label: Infra image + description: Name of the image for the infra node servers master_flavor: type: string @@ -65,7 +75,12 @@ parameters: node_flavor: type: string label: Node flavor - description: Flavor of the node servers + description: Flavor of the compute node servers + + infra_flavor: + type: string + label: Infra flavor + description: Flavor of the infra node servers outputs: @@ -83,15 +98,27 @@ outputs: node_names: description: Name of the nodes - value: { get_attr: [ nodes, name ] } + value: { get_attr: [ compute_nodes, name ] } node_ips: description: IPs of the nodes - value: { get_attr: [ nodes, private_ip ] } + value: { get_attr: [ compute_nodes, private_ip ] } node_floating_ips: description: Floating IPs of the nodes - value: { get_attr: [ nodes, floating_ip ] } + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } resources: @@ -218,6 +245,29 @@ resources: remote_mode: remote_group_id remote_group_id: { get_resource: master-secgrp } + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + masters: type: OS::Heat::ResourceGroup properties: @@ -248,7 +298,7 @@ resources: cluster_id: { get_param: cluster_id } depends_on: interface - nodes: + compute_nodes: type: OS::Heat::ResourceGroup properties: count: { get_param: num_nodes } @@ -257,12 +307,14 @@ resources: properties: name: str_replace: - template: cluster_id-k8s_type-%index% + template: cluster_id-k8s_type-sub_host_type-%index% params: cluster_id: { get_param: cluster_id } k8s_type: node + sub_host_type: compute cluster_id: { get_param: cluster_id } type: node + subtype: compute image: { get_param: node_image } flavor: { get_param: node_flavor } key_name: { get_resource: keypair } @@ -277,3 +329,36 @@ resources: params: cluster_id: { get_param: cluster_id } depends_on: interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_infra } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: cluster_id-k8s_type-sub_host_type-%index% + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + sub_host_type: infra + cluster_id: { get_param: cluster_id } + type: node + subtype: infra + image: { get_param: infra_image } + flavor: { get_param: infra_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + depends_on: interface diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml index 55f64211a..9dcab3e60 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -19,6 +19,12 @@ parameters: label: Type description: Type master or node + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + key_name: type: string label: Key name @@ -102,11 +108,12 @@ resources: env: { get_param: cluster_id } host-type: { get_param: type } env-host-type: - str_template: + str_replace: template: cluster_id-openshift-type params: cluster_id: { get_param: cluster_id } type: { get_param: type } + sub-host-type: { get_param: subtype } port: type: OS::Neutron::Port diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index d41448dc0..d36bdbf26 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -90,7 +90,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_node' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute' with_together: - parsed_outputs.node_names - parsed_outputs.node_ips @@ -115,6 +115,7 @@ with_flattened: - parsed_outputs.master_floating_ips - parsed_outputs.node_floating_ips + - parsed_outputs.infra_floating_ips - name: Wait for user setup command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' @@ -125,6 +126,7 @@ with_flattened: - parsed_outputs.master_floating_ips - parsed_outputs.node_floating_ips + - parsed_outputs.infra_floating_ips - include: update.yml -- cgit v1.2.3 From 75170e2f6558dc9df9bfdb93dbf2bf9b13c1bce5 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 12 Aug 2015 12:47:32 -0400 Subject: zbx item now uses key as default name. Clean up. --- playbooks/adhoc/zabbix_setup/create_app.yml | 34 ---------------------- .../adhoc/zabbix_setup/create_application.yml | 18 ------------ playbooks/adhoc/zabbix_setup/create_template.yml | 2 +- .../adhoc/zabbix_setup/vars/template_os_linux.yml | 30 ------------------- roles/os_zabbix/library/zbx_item.py | 2 +- 5 files changed, 2 insertions(+), 84 deletions(-) delete mode 100644 playbooks/adhoc/zabbix_setup/create_app.yml delete mode 100644 playbooks/adhoc/zabbix_setup/create_application.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/zabbix_setup/create_app.yml b/playbooks/adhoc/zabbix_setup/create_app.yml deleted file mode 100644 index 3a08b2301..000000000 --- a/playbooks/adhoc/zabbix_setup/create_app.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix - roles: - - ../roles/os_zabbix - post_tasks: - - zbxapi: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - zbx_class: Template - state: list - params: - output: extend - register: templates - - - debug: var=templates - - - name: Create app - include: create_application.yml - vars: - ctp_template: "{{ g_template_heartbeat }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - - diff --git a/playbooks/adhoc/zabbix_setup/create_application.yml b/playbooks/adhoc/zabbix_setup/create_application.yml deleted file mode 100644 index aa6c40ed8..000000000 --- a/playbooks/adhoc/zabbix_setup/create_application.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- debug: var=ctp_template - -- name: Create Application - zbxapi: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - zbx_class: Application - state: present - params: - name: "{{ ctp_template.application['name'] }}" - hostid: 10085 - search: - name: "{{ ctp_template.application['name'] }}" - register: ctp_created_application - -- debug: var=ctp_created_application diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml index 60fb27666..50fff53b2 100644 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ b/playbooks/adhoc/zabbix_setup/create_template.yml @@ -32,8 +32,8 @@ server: "{{ ctp_zserver }}" user: "{{ ctp_zuser }}" password: "{{ ctp_zpassword }}" - name: "{{ item.name }}" key: "{{ item.key }}" + name: "{{ item.name | default(item.key, true) }}" value_type: "{{ item.value_type | default('int') }}" template_name: "{{ ctp_template.name }}" with_items: ctp_template.zitems diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml index 6fab08879..9cc038ffa 100644 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml @@ -3,118 +3,88 @@ g_template_os_linux: name: Template OS Linux zitems: - key: kernel.uname.sysname - name: kernel.uname.sysname value_type: string - key: kernel.all.cpu.wait.total - name: kernel.all.cpu.wait.total value_type: int - key: kernel.all.cpu.irq.hard - name: kernel.all.cpu.irq.hard value_type: int - key: kernel.all.cpu.idle - name: kernel.all.cpu.idle value_type: int - key: kernel.uname.distro - name: kernel.uname.distro value_type: string - key: kernel.uname.nodename - name: kernel.uname.nodename value_type: string - key: kernel.all.cpu.irq.soft - name: kernel.all.cpu.irq.soft value_type: int - key: kernel.all.load.15_minute - name: kernel.all.load.15_minute value_type: float - key: kernel.all.cpu.sys - name: kernel.all.cpu.sys value_type: int - key: kernel.all.load.5_minute - name: kernel.all.load.5_minute value_type: float - key: mem.freemem - name: mem.freemem value_type: int - key: kernel.all.cpu.nice - name: kernel.all.cpu.nice value_type: int - key: mem.util.bufmem - name: mem.util.bufmem value_type: int - key: swap.used - name: swap.used value_type: int - key: kernel.all.load.1_minute - name: kernel.all.load.1_minute value_type: float - key: kernel.uname.version - name: kernel.uname.version value_type: string - key: swap.length - name: swap.length value_type: int - key: mem.physmem - name: mem.physmem value_type: int - key: kernel.all.uptime - name: kernel.all.uptime value_type: int - key: swap.free - name: swap.free value_type: int - key: mem.util.used - name: mem.util.used value_type: int - key: kernel.all.cpu.user - name: kernel.all.cpu.user value_type: int - key: kernel.uname.machine - name: kernel.uname.machine value_type: string - key: hinv.ncpu - name: hinv.ncpu value_type: int - key: mem.util.cached - name: mem.util.cached value_type: int - key: kernel.all.cpu.steal - name: kernel.all.cpu.steal value_type: int - - key: kernel.all.pswitch - name: kernel.all.pswitch value_type: int - key: kernel.uname.release - name: kernel.uname.release value_type: string - key: proc.nprocs - name: proc.nprocs value_type: int diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py index 6cfb16d48..57ec06463 100644 --- a/roles/os_zabbix/library/zbx_item.py +++ b/roles/os_zabbix/library/zbx_item.py @@ -119,7 +119,7 @@ def main(): module.exit_json(changed=True, results=content['result'], state="absent") if state == 'present': - params = {'name': module.params['name'], + params = {'name': module.params.get('name', module.params['key']), 'key_': key, 'hostid': templateid, 'type': module.params['zabbix_type'], -- cgit v1.2.3 From a2e27c5925954ce04fca9c891099a6146a418222 Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Thu, 13 Aug 2015 12:19:20 -0300 Subject: Configure cluster metrics Playbook based on https://docs.openshift.org/latest/admin_guide/cluster_metrics.html. --- playbooks/common/openshift-master/config.yml | 2 + .../files/cluster-metrics/grafana.yaml | 53 +++++++++++++++++ .../cluster-metrics/heapster-serviceaccount.yaml | 4 ++ .../files/cluster-metrics/heapster.yaml | 30 ++++++++++ .../files/cluster-metrics/influxdb.yaml | 67 ++++++++++++++++++++++ roles/openshift_cluster_metrics/tasks/main.yml | 50 ++++++++++++++++ roles/openshift_facts/library/openshift_facts.py | 19 ++++++ roles/openshift_node/defaults/main.yml | 4 ++ roles/openshift_node/templates/node.yaml.v1.j2 | 1 + .../templates/partials/kubeletArguments.j2 | 5 ++ 10 files changed, 235 insertions(+) create mode 100644 roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml create mode 100644 roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml create mode 100644 roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml create mode 100644 roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml create mode 100644 roles/openshift_cluster_metrics/tasks/main.yml create mode 100644 roles/openshift_node/templates/partials/kubeletArguments.j2 (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 904ad2dab..acf85fc04 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -215,6 +215,8 @@ - role: openshift_master_cluster when: openshift_master_ha | bool - openshift_examples + - role: openshift_cluster_metrics + when: openshift.common.use_cluster_metrics | bool # Additional instance config for online deployments - name: Additional instance config diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml new file mode 100644 index 000000000..bff422efc --- /dev/null +++ b/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml @@ -0,0 +1,53 @@ +apiVersion: "v1" +kind: "List" +items: + - + apiVersion: "v1" + kind: "Service" + metadata: + labels: + provider: "fabric8" + component: "grafana" + name: "grafana" + spec: + ports: + - + port: 80 + targetPort: "http" + selector: + provider: "fabric8" + component: "grafana" + - + apiVersion: "v1" + kind: "ReplicationController" + metadata: + labels: + provider: "fabric8" + component: "grafana" + name: "grafana" + spec: + replicas: 1 + selector: + provider: "fabric8" + component: "grafana" + template: + metadata: + labels: + provider: "fabric8" + component: "grafana" + spec: + containers: + - + env: + - + name: "INFLUXDB_SERVICE_NAME" + value: "INFLUXDB_MONITORING" + - + name: "GRAFANA_DEFAULT_DASHBOARD" + value: "/dashboard/file/kubernetes.json" + image: "fabric8/grafana:1.9.1_2" + name: "grafana" + ports: + - + containerPort: 3000 + name: "http" \ No newline at end of file diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml new file mode 100644 index 000000000..1de2ad699 --- /dev/null +++ b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: heapster \ No newline at end of file diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml new file mode 100644 index 000000000..83e314074 --- /dev/null +++ b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml @@ -0,0 +1,30 @@ +apiVersion: "v1" +kind: "List" +items: + - + apiVersion: "v1" + kind: "ReplicationController" + metadata: + labels: + provider: "fabric8" + component: "heapster" + name: "heapster" + spec: + replicas: 1 + selector: + provider: "fabric8" + component: "heapster" + template: + metadata: + labels: + provider: "fabric8" + component: "heapster" + spec: + containers: + - + args: + - "-source=kubernetes:https://kubernetes.default.svc.cluster.local?auth=&insecure=true&useServiceAccount=true" + - "-sink=influxdb:http://influxdb-monitoring.default.svc.cluster.local:8086" + image: "kubernetes/heapster:V0.14.2" + name: "heapster" + serviceAccount: "heapster" \ No newline at end of file diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml new file mode 100644 index 000000000..6f67c3d7c --- /dev/null +++ b/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml @@ -0,0 +1,67 @@ +apiVersion: "v1" +kind: "List" +items: + - + apiVersion: "v1" + kind: "Service" + metadata: + labels: + provider: "fabric8" + component: "influxdb-monitoring" + name: "influxdb-monitoring" + spec: + ports: + - + port: 8086 + targetPort: "http" + selector: + provider: "fabric8" + component: "influxdb-monitoring" + - + apiVersion: "v1" + kind: "ReplicationController" + metadata: + labels: + provider: "fabric8" + component: "influxdb-monitoring" + name: "influxdb-monitoring" + spec: + replicas: 1 + selector: + provider: "fabric8" + component: "influxdb-monitoring" + template: + metadata: + labels: + provider: "fabric8" + component: "influxdb-monitoring" + spec: + containers: + - + env: + - + name: "PRE_CREATE_DB" + value: "k8s;grafana" + image: "fabric8/influxdb:0.8.8" + name: "influxdb" + ports: + - + containerPort: 8090 + name: "raft" + - + containerPort: 8099 + name: "protobuf" + - + containerPort: 8083 + name: "admin" + - + containerPort: 8086 + name: "http" + volumeMounts: + - + mountPath: "/data" + name: "influxdb-data" + volumes: + - + emptyDir: + name: "influxdb-data" \ No newline at end of file diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml new file mode 100644 index 000000000..3938aba4c --- /dev/null +++ b/roles/openshift_cluster_metrics/tasks/main.yml @@ -0,0 +1,50 @@ +--- + +- name: Install cluster metrics templates + copy: + src: cluster-metrics + dest: /etc/openshift/ + +- name: Create InfluxDB Services + command: > + {{ openshift.common.client_binary }} create -f + /etc/openshift/cluster-metrics/influxdb.yaml + register: oex_influxdb_services + failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0" + changed_when: false + +- name: Create Heapster Service Account + command: > + {{ openshift.common.client_binary }} create -f + /etc/openshift/cluster-metrics/heapster-serviceaccount.yaml + register: oex_heapster_serviceaccount + failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0" + changed_when: false + +- name: Add cluster-reader role to Heapster + command: > + {{ openshift.common.admin_binary }} policy + add-cluster-role-to-user + cluster-reader + system:serviceaccount:default:heapster + register: oex_cluster_header_role + register: oex_cluster_header_role + failed_when: "'already exists' not in oex_cluster_header_role.stderr and oex_cluster_header_role.rc != 0" + changed_when: false + +- name: Create Heapster Services + command: > + {{ openshift.common.client_binary }} create -f + /etc/openshift/cluster-metrics/heapster.yaml + register: oex_heapster_services + failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0" + changed_when: false + +- name: Create Grafana Services + command: > + {{ openshift.common.client_binary }} create -f + /etc/openshift/cluster-metrics/grafana.yaml + register: oex_grafana_services + failed_when: "'already exists' not in oex_grafana_services.stderr and oex_grafana_services.rc != 0" + changed_when: false + diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 4e0989c5f..c1c4e1b5c 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -323,6 +323,24 @@ def set_fluentd_facts_if_unset(facts): facts['common']['use_fluentd'] = use_fluentd return facts +def set_cluster_metrics_facts_if_unset(facts): + """ Set cluster metrics facts if not already present in facts dict + dict: the facts dict updated with the generated cluster metrics facts if + missing + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated cluster metrics + facts if they were not already present + + """ + if 'common' in facts: + deployment_type = facts['common']['deployment_type'] + if 'use_cluster_metrics' not in facts['common']: + use_cluster_metrics = True if deployment_type == 'origin' else False + facts['common']['use_cluster_metrics'] = use_cluster_metrics + return facts + def set_identity_providers_if_unset(facts): """ Set identity_providers fact if not already present in facts dict @@ -700,6 +718,7 @@ class OpenShiftFacts(object): facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_fluentd_facts_if_unset(facts) + facts = set_cluster_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_registry_url_if_unset(facts) facts = set_sdn_facts_if_unset(facts) diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index be51195f2..1dbcc4301 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -6,3 +6,7 @@ os_firewall_allow: port: 80/tcp - service: https port: 443/tcp +- service: Openshift kubelet ReadOnlyPort + port: 10255/tcp +- service: Openshift kubelet ReadOnlyPort udp + port: 10255/udp diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 7778a2a61..a0a7e5098 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -18,3 +18,4 @@ servingInfo: clientCA: ca.crt keyFile: server.key volumeDirectory: {{ openshift_data_dir }}/openshift.local.volumes +{% include 'partials/kubeletArguments.j2' %} \ No newline at end of file diff --git a/roles/openshift_node/templates/partials/kubeletArguments.j2 b/roles/openshift_node/templates/partials/kubeletArguments.j2 new file mode 100644 index 000000000..6c3bd04c5 --- /dev/null +++ b/roles/openshift_node/templates/partials/kubeletArguments.j2 @@ -0,0 +1,5 @@ +{% if openshift.common.use_cluster_metrics | bool %} +kubeletArguments: + "read-only-port": + - "10255" +{% endif %} \ No newline at end of file -- cgit v1.2.3 From 65f9922028595c36eb10c8f43b4db51817d64c32 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 13 Aug 2015 16:15:44 -0400 Subject: Fix for node labeling where internal node name != inventory_hostname --- playbooks/common/openshift-node/config.yml | 3 ++- roles/openshift_manage_node/tasks/main.yml | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 4010b4c9e..705f7f223 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -128,9 +128,10 @@ vars: openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.common.hostname') }}" + | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" + openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - set_fact: openshift_scheduleable_nodes: "{{ hostvars diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 472d63efe..cbf1c667f 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -19,8 +19,7 @@ - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ item }} {{ hostvars[item]['openshift_node_labels'] | oo_combine_dict }} + {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }} with_items: - - "{{ openshift_nodes }}" - when: - "'openshift_node_labels' in hostvars[item]" + - "{{ openshift_node_vars }}" + when: "'labels' in item.openshift.node" -- cgit v1.2.3 From 29f4037106ac8ada0955f5c1f309b5de3e0e94ea Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Fri, 14 Aug 2015 16:09:51 -0400 Subject: Update instance sizes for online --- playbooks/aws/openshift-cluster/vars.online.int.yml | 4 ++-- playbooks/aws/openshift-cluster/vars.online.prod.yml | 4 ++-- playbooks/aws/openshift-cluster/vars.online.stage.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index b9ee29b83..bb18e13b0 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'integration', 'integration-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'integration', 'integration-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'integration', 'integration-node' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index 691582834..bbef9cc56 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'production', 'production-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'production', 'production-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'production', 'production-node' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index 2ec43ad4c..9008a55ba 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'stage', 'stage-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'stage', 'stage-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'stage', 'stage-node' ] -- cgit v1.2.3 From b9606a11fe875d9151a0238bc45f149e1cbe819c Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Mon, 17 Aug 2015 10:43:49 +0200 Subject: Properly pass the "external network" option to the HEAT template Fixes #471 --- README_openstack.md | 12 +++---- .../openshift-cluster/files/heat_stack.yaml | 42 ++++++++++++---------- playbooks/openstack/openshift-cluster/launch.yml | 33 ++++++----------- .../tasks/configure_openstack.yml | 27 -------------- playbooks/openstack/openshift-cluster/vars.yml | 8 ++--- 5 files changed, 43 insertions(+), 79 deletions(-) delete mode 100644 playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml (limited to 'playbooks') diff --git a/README_openstack.md b/README_openstack.md index 3076e7b08..8d8f6ef3f 100644 --- a/README_openstack.md +++ b/README_openstack.md @@ -25,20 +25,20 @@ Configuration The following options can be passed via the `-o` flag of the `create` command: -* `image_name`: Name of the image to use to spawn VMs -* `keypair` (default to `${LOGNAME}_key`): Name of the ssh key -* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key -* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master -* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the nodes * `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value. +* `image_name`: Name of the image to use to spawn VMs +* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key +* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master +* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes +* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes * `network_prefix` (default to `openshift-ansible-`): prefix prepended to all network objects (net, subnet, router, security groups) * `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use * `net_cidr` (default to `192.168..0/24`): CIDR of the network created by `heat_stack.yaml` * `external_net` (default to `external`): Name of the external network to connect to -* `floating_ip_pools` (default to `external`): comma separated list of floating IP pools +* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools * `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index d53884e0d..40e4ab22c 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -9,21 +9,6 @@ parameters: label: Cluster ID description: Identifier of the cluster - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - cidr: type: string label: CIDR @@ -40,6 +25,12 @@ parameters: description: Name of the external network default: external + floating_ip_pool: + type: string + label: Floating IP pool + description: Floating IP pools + default: external + ssh_public_key: type: string label: SSH public key @@ -52,6 +43,21 @@ parameters: description: Source of legitimate ssh connections default: 0.0.0.0/0 + num_masters: + type: number + label: Number of masters + description: Number of masters + + num_nodes: + type: number + label: Number of compute nodes + description: Number of compute nodes + + num_infra: + type: number + label: Number of infrastructure nodes + description: Number of infrastructure nodes + master_image: type: string label: Master image @@ -290,7 +296,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: master-secgrp } - floating_network: { get_param: external_net } + floating_network: { get_param: floating_ip_pool } net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -322,7 +328,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: node-secgrp } - floating_network: { get_param: external_net } + floating_network: { get_param: floating_ip_pool } net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -355,7 +361,7 @@ resources: secgrp: - { get_resource: node-secgrp } - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } + floating_network: { get_param: floating_ip_pool } net_name: str_replace: template: openshift-ansible-cluster_id-net diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index d36bdbf26..651aef40b 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -19,30 +19,21 @@ changed_when: false failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - name: Create OpenStack Stack - command: 'heat stack-create -f {{ openstack_infra_heat_stack }} - -P cluster_id={{ cluster_id }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} - -P cidr={{ openstack_network_cidr }} - -P ssh_incoming={{ openstack_ssh_access_from }} - -P num_masters={{ num_masters }} - -P num_nodes={{ num_nodes }} - -P num_infra={{ num_infra }} - -P master_image={{ deployment_vars[deployment_type].image }} - -P node_image={{ deployment_vars[deployment_type].image }} - -P infra_image={{ deployment_vars[deployment_type].image }} - -P master_flavor={{ openstack_flavor["master"] }} - -P node_flavor={{ openstack_flavor["node"] }} - -P infra_flavor={{ openstack_flavor["infra"] }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" - openshift-ansible-{{ cluster_id }}-stack' + - set_fact: + heat_stack_action: 'stack-create' when: stack_show_result.rc == 1 + - set_fact: + heat_stack_action: 'stack-update' + when: stack_show_result.rc == 0 - - name: Update OpenStack Stack - command: 'heat stack-update -f {{ openstack_infra_heat_stack }} + - name: Create or Update OpenStack Stack + command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} -P cluster_id={{ cluster_id }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} + -P dns_nameservers={{ openstack_network_dns | join(",") }} + -P external_net={{ openstack_network_external_net }} + -P floating_ip_pool={{ openstack_floating_ip_pool }} + -P ssh_public_key="{{ openstack_ssh_public_key }}" -P ssh_incoming={{ openstack_ssh_access_from }} -P num_masters={{ num_masters }} -P num_nodes={{ num_nodes }} @@ -53,9 +44,7 @@ -P master_flavor={{ openstack_flavor["master"] }} -P node_flavor={{ openstack_flavor["node"] }} -P infra_flavor={{ openstack_flavor["infra"] }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" openshift-ansible-{{ cluster_id }}-stack' - when: stack_show_result.rc == 0 - name: Wait for OpenStack Stack readiness shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' diff --git a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml b/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml deleted file mode 100644 index 2cbdb4805..000000000 --- a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Check infra - command: 'heat stack-show {{ openstack_network_prefix }}-stack' - register: stack_show_result - changed_when: false - failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - -- name: Create infra - command: 'heat stack-create -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack' - when: stack_show_result.rc == 1 - -- name: Update infra - command: 'heat stack-update -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack' - when: stack_show_result.rc == 0 - -- name: Wait for infra readiness - shell: 'heat stack-show {{ openstack_network_prefix }}-stack | awk ''$2 == "stack_status" {print $4}''' - register: stack_show_status_result - until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 30 - delay: 1 - failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - -- name: Create ssh keypair - nova_keypair: - name: "{{ openstack_ssh_keypair }}" - public_key: "{{ openstack_ssh_public_key }}" diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index 43e25f2e6..262d3f4ed 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -1,18 +1,14 @@ --- openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | default('files/heat_stack.yaml', True) }}" -openstack_network_prefix: "{{ lookup('oo_option', 'network_prefix' ) | - default('openshift-ansible-'+cluster_id, True) }}" openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) | default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}" openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) | default('external', True) }}" -openstack_floating_ip_pools: "{{ lookup('oo_option', 'floating_ip_pools') | - default('external', True) | oo_split() }}" +openstack_floating_ip_pool: "{{ lookup('oo_option', 'floating_ip_pool' ) | + default('external', True) }}" openstack_network_dns: "{{ lookup('oo_option', 'dns' ) | default('8.8.8.8,8.8.4.4', True) | oo_split() }}" -openstack_ssh_keypair: "{{ lookup('oo_option', 'keypair' ) | - default(lookup('env', 'LOGNAME')+'_key', True) }}" openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') | default('~/.ssh/id_rsa.pub', True)) }}" openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | -- cgit v1.2.3 From 69f6fd410500a3dd20a97a9e3dad860761b09ac8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 2 Jul 2015 11:59:22 -0400 Subject: playbooks/adhoc: Add a tutorial-reset playbook to undo everything This makes it easier to run through the tutorial, as well as reset a VM or baremetal node to a clean slate for developer testing. --- playbooks/adhoc/tutorial-reset.yml | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 playbooks/adhoc/tutorial-reset.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml new file mode 100644 index 000000000..1ceb72d19 --- /dev/null +++ b/playbooks/adhoc/tutorial-reset.yml @@ -0,0 +1,46 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - docker + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/docker + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe -- cgit v1.2.3 From c85b503d6f02514beb9ea73c6a12fe2ef1bfb25a Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Wed, 12 Aug 2015 09:48:44 -0400 Subject: Added /root/.kube to be deleted so that the stuff there does not prevent a new install. --- playbooks/adhoc/tutorial-reset.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml index 1ceb72d19..77bc13b17 100644 --- a/playbooks/adhoc/tutorial-reset.yml +++ b/playbooks/adhoc/tutorial-reset.yml @@ -39,6 +39,7 @@ - /etc/atomic-enterprise - /etc/openshift - /var/lib/docker + - /root/.kube - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 472ecf8ac4bd63556b91b70a779e2e738546f77c Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Thu, 13 Aug 2015 18:28:14 -0400 Subject: Renamed the file as it mainly applies to atomic enterprise. --- .../adhoc/atomic_enterprise_tutorial_reset.yml | 47 ++++++++++++++++++++++ playbooks/adhoc/tutorial-reset.yml | 47 ---------------------- 2 files changed, 47 insertions(+), 47 deletions(-) create mode 100644 playbooks/adhoc/atomic_enterprise_tutorial_reset.yml delete mode 100644 playbooks/adhoc/tutorial-reset.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml new file mode 100644 index 000000000..77bc13b17 --- /dev/null +++ b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml @@ -0,0 +1,47 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - docker + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/docker + - /root/.kube + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml deleted file mode 100644 index 77bc13b17..000000000 --- a/playbooks/adhoc/tutorial-reset.yml +++ /dev/null @@ -1,47 +0,0 @@ -# This deletes *ALL* Docker images, and uninstalls OpenShift and -# Atomic Enterprise RPMs. It is primarily intended for use -# with the tutorial as well as for developers to reset state. - -- hosts: - - OSEv3:children - - sudo: yes - - tasks: - - service: name={{ item }} state=stopped - with_items: - - docker - - atomic-enterprise-master - - atomic-enterprise-node - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - file: path={{ item }} state=absent - with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/docker - - /root/.kube - - - user: name={{ item }} state=absent remove=yes - with_items: - - alice - - joe -- cgit v1.2.3 From 008aa1b39a8c27cf227c87cdf225182a18a992e6 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Fri, 14 Aug 2015 17:26:45 -0400 Subject: Updated tutorial reset file and made following chages: 1. Included openshift clean up 2. Renamed file to atomic_openshift_tutorial_reset.yml 3. docker service is not not stopped 4. docker containers and images are removed 5. /etc/openshift-sdn are removed too now --- .../adhoc/atomic_enterprise_tutorial_reset.yml | 47 --------------- .../adhoc/atomic_openshift_tutorial_reset.yml | 68 ++++++++++++++++++++++ 2 files changed, 68 insertions(+), 47 deletions(-) delete mode 100644 playbooks/adhoc/atomic_enterprise_tutorial_reset.yml create mode 100644 playbooks/adhoc/atomic_openshift_tutorial_reset.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml deleted file mode 100644 index 77bc13b17..000000000 --- a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml +++ /dev/null @@ -1,47 +0,0 @@ -# This deletes *ALL* Docker images, and uninstalls OpenShift and -# Atomic Enterprise RPMs. It is primarily intended for use -# with the tutorial as well as for developers to reset state. - -- hosts: - - OSEv3:children - - sudo: yes - - tasks: - - service: name={{ item }} state=stopped - with_items: - - docker - - atomic-enterprise-master - - atomic-enterprise-node - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - file: path={{ item }} state=absent - with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/docker - - /root/.kube - - - user: name={{ item }} state=absent remove=yes - with_items: - - alice - - joe diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml new file mode 100644 index 000000000..91159ad8e --- /dev/null +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -0,0 +1,68 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - openshift-master + - openshift-node + - openvswitch + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - openshift + - openshift-master + - openshift-node + - openshift-sdn-ovs + - tuned-profiles-openshift-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker ps -a -q | xargs docker stop + changed_when: False + + - shell: docker ps -a -q| xargs docker rm + changed_when: False + + - shell: docker images -q |xargs docker rmi + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /etc/openshift-sdn + - /root/.kube + - /etc/sysconfig/openshift + - /var/lib/openshift + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe -- cgit v1.2.3 From 0e94fa986dd928888c36d2fbef71359c0b9b05d2 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Mon, 17 Aug 2015 11:01:41 -0400 Subject: Updated to include origin and atomic-openshift RPMs re-factoring to include all origin, AE and openshift products. For back-word compatibility, older openshift and AE naming is retained too. --- .../adhoc/atomic_openshift_tutorial_reset.yml | 37 ++++++++++++++++++---- 1 file changed, 31 insertions(+), 6 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 91159ad8e..1200caa2a 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -10,15 +10,29 @@ tasks: - service: name={{ item }} state=stopped with_items: + - openvswitch + - origin-master + - origin-node + - atomic-openshift-master + - atomic-openshift-node - openshift-master - openshift-node - - openvswitch - atomic-enterprise-master - atomic-enterprise-node - yum: name={{ item }} state=absent with_items: - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-origin-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - tuned-profiles-atomic-openshift-node - atomic-enterprise - atomic-enterprise-master - atomic-enterprise-node @@ -36,6 +50,9 @@ - shell: systemctl daemon-reload changed_when: False + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true changed_when: False @@ -53,14 +70,22 @@ - file: path={{ item }} state=absent with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - /etc/openshift-sdn - /root/.kube - - /etc/sysconfig/openshift + - /etc/origin + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/origin - /var/lib/openshift + - /var/lib/atomic-enterprise + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 3c3669ccf9bacd69a222cdb45a0c377da0ce090a Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 19 Aug 2015 13:21:20 -0400 Subject: remove fstab entry after pv creation --- playbooks/adhoc/create_pv/create_pv.yaml | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 684a0ca72..591b1d902 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -118,6 +118,13 @@ state: unmounted fstype: ext4 + - name: remove from fstab + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + state: absent + fstype: ext4 + - name: detach drive delegate_to: localhost ec2_vol: -- cgit v1.2.3 From 0dc89f3583a5e88e1ca66780e974bc9520910410 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 19 Aug 2015 17:20:26 -0400 Subject: Added tagging to the pv volumes --- playbooks/adhoc/create_pv/create_pv.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 591b1d902..4f0ef7a75 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -50,6 +50,16 @@ - debug: var=vol + - name: tag the vol with a name + ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}} + args: + tags: + Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" + env: "{{cli_environment}}" + register: voltags + + - debug: var=voltags + - name: Configure the drive gather_facts: no hosts: oo_master -- cgit v1.2.3 From 49923edfba6d396140881d6a920e83f9ecf79f77 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 20 Aug 2015 11:44:27 -0400 Subject: fixed zbx_user. Update password playbook added --- playbooks/adhoc/zabbix_setup/create_user.yml | 31 ++++++++++++++++++++++++++++ roles/os_zabbix/library/zbx_user.py | 13 ++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml new file mode 100644 index 000000000..dd74798b7 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_user.yml @@ -0,0 +1,31 @@ +--- +# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' +# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: admin + g_zpassword: "{{ cli_password }}" + roles: + - ../../../roles/os_zabbix + post_tasks: + - zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + register: users + + - debug: var=users + + - name: Update zabbix creds for admin + zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + alias: Admin + passwd: "{{ cli_new_password | default(g_zpassword, true) }}" diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py index 50f6fc075..c45c9a75d 100644 --- a/roles/os_zabbix/library/zbx_user.py +++ b/roles/os_zabbix/library/zbx_user.py @@ -54,13 +54,15 @@ def get_usergroups(zapi, usergroups): if content['result']: ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) - return ugroups - + return ugroups or None def get_usertype(user_type): ''' Determine zabbix user account type ''' + if not user_type: + return None + utype = 1 if 'super' in user_type: utype = 3 @@ -84,9 +86,9 @@ def main(): alias=dict(default=None, type='str'), name=dict(default=None, type='str'), surname=dict(default=None, type='str'), - user_type=dict(default='user', type='str'), + user_type=dict(default=None, type='str'), passwd=dict(default=None, type='str'), - usergroups=dict(default=None, type='list'), + usergroups=dict(default=[], type='list'), debug=dict(default=False, type='bool'), state=dict(default='present', type='str'), ), @@ -129,6 +131,9 @@ def main(): 'type': get_usertype(module.params['user_type']), } + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + if not exists(content): # if we didn't find it, create it content = zapi.get_content(zbx_class_name, 'create', params) -- cgit v1.2.3 From ced2ad4551632d93d6a17391913effefe67607b4 Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Tue, 25 Aug 2015 17:04:02 +0200 Subject: Add etcd nodes management in libvirt --- playbooks/libvirt/openshift-cluster/launch.yml | 8 ++++++++ playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml | 5 +++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 830f9d216..d3e768de5 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -17,6 +17,14 @@ - include: tasks/configure_libvirt.yml + - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ etcd_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "default" + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml - include: tasks/launch_instances.yml vars: diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 4cb494056..2a0c90b46 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -63,8 +63,9 @@ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}''' register: nb_allocated_ips until: nb_allocated_ips.stdout == '{{ instances | length }}' - retries: 30 + retries: 60 delay: 1 + when: instances | length != 0 - name: Collect IP addresses of the VMs shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' @@ -72,7 +73,7 @@ with_items: instances - set_fact: - ips: "{{ scratch_ip.results | oo_collect('stdout') }}" + ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}" - name: Add new instances add_host: -- cgit v1.2.3 From 245bf785df17941e851e2ac7d3916e1159ddff23 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Tue, 25 Aug 2015 17:22:39 -0400 Subject: Set node labels for AWS hosts --- playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 236d84e74..e9ebc3e02 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -147,6 +147,18 @@ tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_sub-host-type_{{ sub_host_type }}" +- set_fact: + node_label: + region: "{{ec2_region}}" + type: "{{sub_host_type}}" + when: host_type == "node" + +- set_fact: + node_label: + region: "{{ec2_region}}" + type: "{{host_type}}" + when: host_type != "node" + - name: Add new instances groups and variables add_host: hostname: "{{ item.0 }}" @@ -156,6 +168,7 @@ groups: "{{ instance_groups }}" ec2_private_ip_address: "{{ item.1.private_ip }}" ec2_ip_address: "{{ item.1.public_ip }}" + openshift_node_labels: "{{ node_label }}" with_together: - instances - ec2.instances -- cgit v1.2.3 From 1b3fff6248fbd6788a26ee2b6c60f7731891c0f4 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Fri, 19 Jun 2015 14:41:10 -0400 Subject: Atomic Enterprise related changes. --- DEPLOYMENT_TYPES.md | 23 +++++++ bin/cluster | 4 ++ inventory/byo/hosts.example | 6 +- playbooks/byo/openshift_facts.yml | 2 +- playbooks/common/openshift-master/config.yml | 17 ++--- playbooks/common/openshift-master/service.yml | 4 +- playbooks/common/openshift-node/config.yml | 10 +-- playbooks/common/openshift-node/service.yml | 4 +- roles/openshift_common/tasks/main.yml | 2 +- roles/openshift_common/vars/main.yml | 2 +- roles/openshift_facts/library/openshift_facts.py | 80 ++++++++++++++-------- roles/openshift_facts/tasks/main.yml | 2 +- roles/openshift_master/README.md | 10 +-- roles/openshift_master/defaults/main.yml | 10 +-- roles/openshift_master/handlers/main.yml | 4 +- roles/openshift_master/meta/main.yml | 2 +- roles/openshift_master/tasks/main.yml | 55 ++++++++++----- roles/openshift_master/vars/main.yml | 2 +- roles/openshift_master_ca/tasks/main.yml | 4 +- roles/openshift_master_ca/vars/main.yml | 2 +- roles/openshift_master_certificates/vars/main.yml | 4 +- roles/openshift_master_cluster/tasks/configure.yml | 8 +-- .../tasks/configure_deferred.yml | 4 +- roles/openshift_node/README.md | 16 ++--- roles/openshift_node/defaults/main.yml | 2 +- roles/openshift_node/handlers/main.yml | 4 +- roles/openshift_node/tasks/main.yml | 34 ++++----- roles/openshift_node/vars/main.yml | 2 +- roles/openshift_node_certificates/README.md | 4 +- roles/openshift_node_certificates/vars/main.yml | 6 +- roles/openshift_registry/vars/main.yml | 3 +- roles/openshift_repos/vars/main.yml | 7 +- roles/openshift_router/vars/main.yml | 3 +- roles/openshift_storage_nfs_lvm/tasks/main.yml | 2 +- 34 files changed, 208 insertions(+), 136 deletions(-) create mode 100644 DEPLOYMENT_TYPES.md (limited to 'playbooks') diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md new file mode 100644 index 000000000..1f64e223a --- /dev/null +++ b/DEPLOYMENT_TYPES.md @@ -0,0 +1,23 @@ +#Deployment Types + +This module supports OpenShift Origin, OpenShift Enterprise, and Atomic +Enterprise Platform. Each deployment type sets various defaults used throughout +your environment. + +The table below outlines the defaults per `deployment_type`. + +| deployment_type | origin | enterprise (< 3.1) | atomic-enterprise | openshift-enterprise (>= 3.1) | +|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------| +| **openshift.common.service_type** (also used for package names) | origin | openshift | atomic-openshift | | +| **openshift.common.config_base** | /etc/origin | /etc/openshift | /etc/origin | /etc/origin | +| **openshift.common.data_dir** | /var/lib/origin | /var/lib/openshift | /var/lib/origin | /var/lib/origin | +| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} | +| **Image Streams** | centos | rhel + xpaas | N/A | rhel | + + +**NOTE** `enterprise` deloyment type is used for OpenShift Enterprise version +3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will +make use of the new `openshift-enterprise` deployment type. Additional work to +migrate between the two will be forthcoming. + + diff --git a/bin/cluster b/bin/cluster index c80fe0cab..486bf2a48 100755 --- a/bin/cluster +++ b/bin/cluster @@ -48,6 +48,7 @@ class Cluster(object): deployment_type = os.environ['OS_DEPLOYMENT_TYPE'] return deployment_type + def create(self, args): """ Create an OpenShift cluster for given provider @@ -258,6 +259,9 @@ if __name__ == '__main__': meta_parser.add_argument('-t', '--deployment-type', choices=['origin', 'online', 'enterprise'], help='Deployment type. (default: origin)') + meta_parser.add_argument('-T', '--product-type', + choices=['openshift' 'atomic-enterprise'], + help='Product type. (default: openshift)') meta_parser.add_argument('-o', '--option', action='append', help='options') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 16b89d458..c0469c3b3 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -18,7 +18,7 @@ ansible_ssh_user=root #ansible_sudo=true # deployment type valid values are origin, online and enterprise -deployment_type=enterprise +deployment_type=atomic-enterprise # Enable cluster metrics #use_cluster_metrics=true @@ -52,7 +52,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # For installation the value of openshift_master_cluster_hostname must resolve # to the first master defined in the inventory. # The HA solution must be manually configured after installation and must ensure -# that openshift-master is running on a single master host. +# that the master is running on a single master host. #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com #openshift_master_cluster_defer_ha=True @@ -61,7 +61,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #osm_default_subdomain=apps.test.example.com # additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] # default project node selector #osm_default_node_selector='region=primary' diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index cd282270f..6d7c12fd4 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,5 +1,5 @@ --- -- name: Gather OpenShift facts +- name: Gather Cluster facts hosts: all gather_facts: no roles: diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index acf85fc04..5a179f791 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -37,7 +37,7 @@ public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - name: Check status of external etcd certificatees stat: - path: "/etc/openshift/master/{{ item }}" + path: "{{ openshift.common.config_base }}/master/{{ item }}" with_items: - master.etcd-client.crt - master.etcd-ca.crt @@ -47,7 +47,7 @@ | map(attribute='stat.exists') | list | intersect([false])}}" etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }} - etcd_cert_config_dir: /etc/openshift/master + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: master.etcd- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config @@ -96,7 +96,7 @@ tasks: - name: Ensure certificate directory exists file: - path: /etc/openshift/master + path: "{{ openshift.common.config_base }}/master" state: directory when: etcd_client_certs_missing is defined and etcd_client_certs_missing - name: Unarchive the tarball on the master @@ -134,7 +134,7 @@ - name: Check status of master certificates stat: - path: "/etc/openshift/master/{{ item }}" + path: "{{ openshift.common.config_base }}/master/{{ item }}" with_items: openshift_master_certs register: g_master_cert_stat_result - set_fact: @@ -142,12 +142,12 @@ | map(attribute='stat.exists') | list | intersect([false])}}" master_cert_subdir: master-{{ openshift.common.hostname }} - master_cert_config_dir: /etc/openshift/master + master_cert_config_dir: "{{ openshift.common.config_base }}/master" - name: Configure master certificates hosts: oo_first_master vars: - master_generated_certs_dir: /etc/openshift/generated-configs + master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs" masters_needing_certs: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master'])) | oo_filter_list(filter_attr='master_certs_missing') }}" @@ -189,7 +189,7 @@ pre_tasks: - name: Ensure certificate directory exists file: - path: /etc/openshift/master + path: "{{ openshift.common.config_base }}/master" state: directory when: master_certs_missing and 'oo_first_master' not in group_names - name: Unarchive the tarball on the master @@ -214,7 +214,8 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool - - openshift_examples + - role: openshift_examples + when: deployment_type in ['enterprise','openshift-enterprise','origin'] - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index 5636ad156..27e1e66f9 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -10,9 +10,9 @@ add_host: name={{ item }} groups=g_service_masters with_items: oo_host_group_exp | default([]) -- name: Change openshift-master state on master instance(s) +- name: Change state on master instance(s) hosts: g_service_masters connection: ssh gather_facts: no tasks: - - service: name=openshift-master state="{{ new_cluster_state }}" + - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 705f7f223..c6d19d131 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -22,7 +22,7 @@ annotations: "{{ openshift_node_annotations | default(None) }}" - name: Check status of node certificates stat: - path: "/etc/openshift/node/{{ item }}" + path: "{{ openshift.common.config_base }}/node/{{ item }}" with_items: - "system:node:{{ openshift.common.hostname }}.crt" - "system:node:{{ openshift.common.hostname }}.key" @@ -35,8 +35,8 @@ certs_missing: "{{ stat_result.results | map(attribute='stat.exists') | list | intersect([false])}}" node_subdir: node-{{ openshift.common.hostname }} - config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }} - node_cert_dir: /etc/openshift/node + config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}" + node_cert_dir: "{{ openshift.common.config_base }}/node" - name: Create temp directory for syncing certs hosts: localhost @@ -89,9 +89,9 @@ path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart openshift-node + # TODO: notify restart node # possibly test service started time against certificate/config file - # timestamps in openshift-node to trigger notify + # timestamps in node to trigger notify - name: Unarchive the tarball on the node unarchive: src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index f76df089f..5cf83e186 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -10,9 +10,9 @@ add_host: name={{ item }} groups=g_service_nodes with_items: oo_host_group_exp | default([]) -- name: Change openshift-node state on node instance(s) +- name: Change state on node instance(s) hosts: g_service_nodes connection: ssh gather_facts: no tasks: - - service: name=openshift-node state="{{ new_cluster_state }}" + - service: name={{ service_type }}-node state="{{ new_cluster_state }}" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 09cc4aaf7..928209f0f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Set common OpenShift facts +- name: Set common Cluster facts openshift_facts: role: common local_facts: diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 8e7d71154..817fe0a5f 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -6,4 +6,4 @@ # interfaces) os_firewall_use_firewalld: False -openshift_data_dir: /var/lib/openshift +openshift_data_dir: /var/lib/origin diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index c1c4e1b5c..2c007554e 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -6,7 +6,7 @@ DOCUMENTATION = ''' --- module: openshift_facts -short_description: OpenShift Facts +short_description: Cluster Facts author: Jason DeTiberus requirements: [ ] ''' @@ -283,28 +283,6 @@ def normalize_provider_facts(provider, metadata): facts = normalize_openstack_facts(metadata, facts) return facts -def set_registry_url_if_unset(facts): - """ Set registry_url fact if not already present in facts dict - - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the generated identity providers - facts if they were not already present - """ - for role in ('master', 'node'): - if role in facts: - deployment_type = facts['common']['deployment_type'] - if 'registry_url' not in facts[role]: - registry_url = "openshift/origin-${component}:${version}" - if deployment_type == 'enterprise': - registry_url = "openshift3/ose-${component}:${version}" - elif deployment_type == 'online': - registry_url = ("openshift3/ose-${component}:${version}") - facts[role]['registry_url'] = registry_url - - return facts - def set_fluentd_facts_if_unset(facts): """ Set fluentd facts if not already present in facts dict dict: the facts dict updated with the generated fluentd facts if @@ -448,6 +426,48 @@ def set_aggregate_facts(facts): return facts +def set_deployment_facts_if_unset(facts): + """ Set Facts that vary based on deployment_type. This currently + includes common.service_type, common.config_base, master.registry_url, + node.registry_url + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated deployment_type + facts + """ + if 'common' in facts: + deployment_type = facts['common']['deployment_type'] + if 'service_type' not in facts['common']: + service_type = 'atomic-openshift' + if deployment_type == 'origin': + service_type = 'openshift' + elif deployment_type in ['enterprise', 'online']: + service_type = 'openshift' + facts['common']['service_type'] = service_type + if 'config_base' not in facts['common']: + config_base = '/etc/origin' + if deployment_type in ['enterprise', 'online']: + config_base = '/etc/openshift' + elif deployment_type == 'origin': + config_base = '/etc/openshift' + facts['common']['config_base'] = config_base + + for role in ('master', 'node'): + if role in facts: + deployment_type = facts['common']['deployment_type'] + if 'registry_url' not in facts[role]: + registry_url = 'aos3/aos-${component}:${version}' + if deployment_type in ['enterprise', 'online']: + registry_url = 'openshift3/ose-${component}:${version}' + elif deployment_type == 'origin': + registry_url = 'openshift/origin-${component}:${version}' + facts[role]['registry_url'] = registry_url + + return facts + + def set_sdn_facts_if_unset(facts): """ Set sdn facts if not already present in facts dict @@ -510,7 +530,7 @@ def get_current_config(facts): # anything from working properly as far as I can tell, perhaps because # we override the kubeconfig path everywhere we use it? # Query kubeconfig settings - kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' + kubeconfig_dir = '/var/lib/origin/openshift.local.certificates' if role == 'node': kubeconfig_dir = os.path.join( kubeconfig_dir, "node-%s" % facts['common']['hostname'] @@ -657,25 +677,25 @@ def get_local_facts_from_file(filename): class OpenShiftFactsUnsupportedRoleError(Exception): - """OpenShift Facts Unsupported Role Error""" + """Origin Facts Unsupported Role Error""" pass class OpenShiftFactsFileWriteError(Exception): - """OpenShift Facts File Write Error""" + """Origin Facts File Write Error""" pass class OpenShiftFactsMetadataUnavailableError(Exception): - """OpenShift Facts Metadata Unavailable Error""" + """Origin Facts Metadata Unavailable Error""" pass class OpenShiftFacts(object): - """ OpenShift Facts + """ Origin Facts Attributes: - facts (dict): OpenShift facts for the host + facts (dict): facts for the host Args: role (str): role for setting local facts @@ -720,8 +740,8 @@ class OpenShiftFacts(object): facts = set_fluentd_facts_if_unset(facts) facts = set_cluster_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) - facts = set_registry_url_if_unset(facts) facts = set_sdn_facts_if_unset(facts) + facts = set_deployment_facts_if_unset(facts) facts = set_aggregate_facts(facts) return dict(openshift=facts) diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index b2cda3a85..fd3d20800 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -6,5 +6,5 @@ - ansible_version | version_compare('1.9.0', 'ne') - ansible_version | version_compare('1.9.0.1', 'ne') -- name: Gather OpenShift facts +- name: Gather Cluster facts openshift_facts: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 0e7ef3aab..155bdb58b 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -1,7 +1,7 @@ -OpenShift Master -================ +OpenShift/Atomic Enterprise Master +================================== -OpenShift Master service installation +Master service installation Requirements ------------ @@ -15,8 +15,8 @@ Role Variables From this role: | Name | Default value | | |-------------------------------------|-----------------------|--------------------------------------------------| -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | -| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up | +| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master | +| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up | | oreg_url | UNDEF | Default docker registry to use | | openshift_master_api_port | UNDEF | | | openshift_master_console_port | UNDEF | | diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index ca8860099..9766d01ae 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -5,11 +5,11 @@ openshift_node_ips: [] os_firewall_allow: - service: etcd embedded port: 4001/tcp -- service: OpenShift api https +- service: api server https port: 8443/tcp -- service: OpenShift dns tcp +- service: dns tcp port: 53/tcp -- service: OpenShift dns udp +- service: dns udp port: 53/udp - service: Fluentd td-agent tcp port: 24224/tcp @@ -22,9 +22,9 @@ os_firewall_allow: - service: Corosync UDP port: 5405/udp os_firewall_deny: -- service: OpenShift api http +- service: api server http port: 8080/tcp -- service: former OpenShift web console port +- service: former web console port port: 8444/tcp - service: former etcd peer port port: 7001/tcp diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index f1e7e1ab3..2981979e0 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,4 +1,4 @@ --- -- name: restart openshift-master - service: name=openshift-master state=restarted +- name: restart master + service: name={{ openshift.common.service_type }}-master state=restarted when: not openshift_master_ha | bool diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index 41a183c3b..c125cb5d0 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: author: Jhon Honce - description: OpenShift Master + description: Master company: Red Hat, Inc. license: Apache License, Version 2.0 min_ansible_version: 1.7 diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 9204d25ce..4dad9b62f 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -12,11 +12,7 @@ msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined -- name: Install OpenShift Master package - yum: pkg=openshift-master state=present - register: install_result - -- name: Set master OpenShift facts +- name: Set master facts openshift_facts: role: master local_facts: @@ -59,8 +55,26 @@ api_server_args: "{{ osm_api_server_args | default(None) }}" controller_args: "{{ osm_controller_args | default(None) }}" +- name: Install Master package + yum: pkg={{ openshift.common.service_type }}-master state=present + register: install_result + +- name: Check for RPM generated config marker file /etc/origin/.config_managed + stat: path=/etc/origin/.rpmgenerated + register: rpmgenerated_config + +- name: Remove RPM generated config files + file: + path: "{{ item }}" + state: absent + when: openshift.common.service_type in ['atomic-enterprise','openshift-enterprise'] and rpmgenerated_config.stat.exists == true + with_items: + - "{{ openshift.common.config_base }}/master" + - "{{ openshift.common.config_base }}/node" + - "{{ openshift.common.config_base }}/.rpmgenerated" + # TODO: These values need to be configurable -- name: Set dns OpenShift facts +- name: Set dns facts openshift_facts: role: dns local_facts: @@ -80,20 +94,27 @@ args: creates: "{{ openshift_master_policy }}" notify: - - restart openshift-master + - restart master - name: Create the scheduler config template: dest: "{{ openshift_master_scheduler_conf }}" src: scheduler.json.j2 notify: - - restart openshift-master + - restart master - name: Install httpd-tools if needed yum: pkg=httpd-tools state=present when: item.kind == 'HTPasswdPasswordIdentityProvider' with_items: openshift.master.identity_providers +- name: Ensure htpasswd directory exists + file: + path: "{{ item.filename | dirname }}" + state: directory + when: item.kind == 'HTPasswdPasswordIdentityProvider' + with_items: openshift.master.identity_providers + - name: Create the htpasswd file if needed copy: dest: "{{ item.filename }}" @@ -109,11 +130,11 @@ dest: "{{ openshift_master_config_file }}" src: master.yaml.v1.j2 notify: - - restart openshift-master + - restart master -- name: Configure OpenShift settings +- name: Configure master settings lineinfile: - dest: /etc/sysconfig/openshift-master + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master regexp: "{{ item.regex }}" line: "{{ item.line }}" with_items: @@ -122,10 +143,10 @@ - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift_master_config_file }}" notify: - - restart openshift-master + - restart master -- name: Start and enable openshift-master - service: name=openshift-master enabled=yes state=started +- name: Start and enable master + service: name={{ openshift.common.service_type }}-master enabled=yes state=started when: not openshift_master_ha | bool register: start_result @@ -146,7 +167,7 @@ shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster when: install_result | changed -- name: Create the OpenShift client config dir(s) +- name: Create the client config dir(s) file: path: "~{{ item }}/.kube" state: directory @@ -159,7 +180,7 @@ # TODO: Update this file if the contents of the source file are not present in # the dest file, will need to make sure to ignore things that could be added -- name: Copy the OpenShift admin client config(s) +- name: Copy the admin client config(s) command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.kube/config args: creates: ~{{ item }}/.kube/config @@ -167,7 +188,7 @@ - root - "{{ ansible_ssh_user }}" -- name: Update the permissions on the OpenShift admin client config(s) +- name: Update the permissions on the admin client config(s) file: path: "~{{ item }}/.kube/config" state: file diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index f6f69966a..a61ba0397 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -1,5 +1,5 @@ --- -openshift_master_config_dir: /etc/openshift/master +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml" openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json" openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 03eb7e15f..303dc9c5d 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: Install the OpenShift package for admin tooling - yum: pkg=openshift state=present +- name: Install the base package for admin tooling + yum: pkg={{ openshift.common.service_type }} state=present register: install_result - name: Reload generated facts diff --git a/roles/openshift_master_ca/vars/main.yml b/roles/openshift_master_ca/vars/main.yml index 2925680bb..1f6af808c 100644 --- a/roles/openshift_master_ca/vars/main.yml +++ b/roles/openshift_master_ca/vars/main.yml @@ -1,5 +1,5 @@ --- -openshift_master_config_dir: /etc/openshift/master +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml index 6214f7918..3f18ddc79 100644 --- a/roles/openshift_master_certificates/vars/main.yml +++ b/roles/openshift_master_certificates/vars/main.yml @@ -1,3 +1,3 @@ --- -openshift_generated_configs_dir: /etc/openshift/generated-configs -openshift_master_config_dir: /etc/openshift/master +openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs" +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml index 8ddc8bfda..7ab9afb51 100644 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ b/roles/openshift_master_cluster/tasks/configure.yml @@ -22,14 +22,14 @@ command: pcs resource defaults resource-stickiness=100 - name: Add the cluster VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group openshift-master + command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master - name: Add the cluster public VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group openshift-master + command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master when: openshift_master_cluster_public_vip != openshift_master_cluster_vip -- name: Add the cluster openshift-master service resource - command: pcs resource create master systemd:openshift-master op start timeout=90s stop timeout=90s --group openshift-master +- name: Add the cluster master service resource + command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master - name: Disable stonith command: pcs property set stonith-enabled=false diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml index a80b6c5b4..3b416005b 100644 --- a/roles/openshift_master_cluster/tasks/configure_deferred.yml +++ b/roles/openshift_master_cluster/tasks/configure_deferred.yml @@ -1,8 +1,8 @@ --- - debug: msg="Deferring config" -- name: Start and enable openshift-master +- name: Start and enable the master service: - name: openshift-master + name: "{{ openshift.common.service_type }}-master" state: started enabled: yes diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 427269931..3aff81274 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -1,12 +1,12 @@ -OpenShift Node -============== +OpenShift/Atomic Enterprise Node +================================ -OpenShift Node service installation +Node service installation Requirements ------------ -One or more OpenShift Master servers. +One or more Master servers. A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos. @@ -14,10 +14,10 @@ rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos. Role Variables -------------- From this role: -| Name | Default value | | -|------------------------------------------|-----------------------|----------------------------------------| -| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node | -| oreg_url | UNDEF (Optional) | Default docker registry to use | +| Name | Default value | | +|------------------------------------------|-----------------------|--------------------------------------------------------| +| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node | +| oreg_url | UNDEF (Optional) | Default docker registry to use | From openshift_common: | Name | Default Value | | diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 1dbcc4301..c4abf9d7c 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,6 @@ --- os_firewall_allow: -- service: OpenShift kubelet +- service: Kubernetes kubelet port: 10250/tcp - service: http port: 80/tcp diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 8b5acefbf..633f3ed13 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,6 +1,6 @@ --- -- name: restart openshift-node - service: name=openshift-node state=restarted +- name: restart node + service: name={{ openshift.common.service_type }}-node state=restarted - name: restart docker service: name=docker state=restarted diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 18f0ce064..5ccb810cf 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -10,16 +10,7 @@ msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online'] -- name: Install OpenShift Node package - yum: pkg=openshift-node state=present - register: node_install_result - -- name: Install openshift-sdn-ovs - yum: pkg=openshift-sdn-ovs state=present - register: sdn_install_result - when: openshift.common.use_openshift_sdn - -- name: Set node OpenShift facts +- name: Set node facts openshift_facts: role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" @@ -38,17 +29,26 @@ portal_net: "{{ openshift_master_portal_net | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" +- name: Install Node package + yum: pkg={{ openshift.common.service_type }}-node state=present + register: node_install_result + +- name: Install sdn-ovs package + yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present + register: sdn_install_result + when: openshift.common.use_openshift_sdn + # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config template: dest: "{{ openshift_node_config_file }}" src: node.yaml.v1.j2 notify: - - restart openshift-node + - restart node -- name: Configure OpenShift Node settings +- name: Configure Node settings lineinfile: - dest: /etc/sysconfig/openshift-node + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node regexp: "{{ item.regex }}" line: "{{ item.line }}" with_items: @@ -57,13 +57,13 @@ - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift_node_config_file }}" notify: - - restart openshift-node + - restart node - stat: path=/etc/sysconfig/docker register: docker_check # TODO: Enable secure registry when code available in origin -- name: Secure OpenShift Registry +- name: Secure Registry lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*' @@ -117,8 +117,8 @@ seboolean: name=virt_use_nfs state=yes persistent=yes when: ansible_selinux and ansible_selinux.status == "enabled" -- name: Start and enable openshift-node - service: name=openshift-node enabled=yes state=started +- name: Start and enable node + service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: start_result - name: pause to prevent service restart from interfering with bootstrapping diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml index cf47f8354..77a9694de 100644 --- a/roles/openshift_node/vars/main.yml +++ b/roles/openshift_node/vars/main.yml @@ -1,3 +1,3 @@ --- -openshift_node_config_dir: /etc/openshift/node +openshift_node_config_dir: "{{ openshift.common.config_base }}/node" openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml" diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md index c6304e4b0..6264d253a 100644 --- a/roles/openshift_node_certificates/README.md +++ b/roles/openshift_node_certificates/README.md @@ -1,5 +1,5 @@ -OpenShift Node Certificates -======================== +OpenShift/Atomic Enterprise Node Certificates +============================================= TODO diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml index a018bb0f9..61fbb1e51 100644 --- a/roles/openshift_node_certificates/vars/main.yml +++ b/roles/openshift_node_certificates/vars/main.yml @@ -1,7 +1,7 @@ --- -openshift_node_config_dir: /etc/openshift/node -openshift_master_config_dir: /etc/openshift/master -openshift_generated_configs_dir: /etc/openshift/generated-configs +openshift_node_config_dir: "{{ openshift.common.config_base }}/node" +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs" openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt" openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key" openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml index 9fb501e85..9967e26f4 100644 --- a/roles/openshift_registry/vars/main.yml +++ b/roles/openshift_registry/vars/main.yml @@ -1,3 +1,2 @@ --- -openshift_master_config_dir: /etc/openshift/master - +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml index bbb4c77e7..319611a0b 100644 --- a/roles/openshift_repos/vars/main.yml +++ b/roles/openshift_repos/vars/main.yml @@ -1,2 +1,7 @@ --- -known_openshift_deployment_types: ['origin', 'online', 'enterprise'] +# origin uses community packages named 'origin' +# online currently uses 'openshift' packages +# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift' +# atomic-enterprise uses Red Hat packages named 'atomic-openshift' +# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1 +known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise'] diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml index 9fb501e85..9967e26f4 100644 --- a/roles/openshift_router/vars/main.yml +++ b/roles/openshift_router/vars/main.yml @@ -1,3 +1,2 @@ --- -openshift_master_config_dir: /etc/openshift/master - +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index e9f5814bb..ead81b876 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -21,4 +21,4 @@ template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d -# TODO - Get the json files to an openshift-master, and load them. \ No newline at end of file +# TODO - Get the json files to a master, and load them. -- cgit v1.2.3 From 3012985b20e44c0ca4f7cce5a70926f518ec19c5 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Fri, 21 Aug 2015 17:44:30 -0400 Subject: Updates for zbx ans module --- filter_plugins/oo_zabbix_filters.py | 29 +++ playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 51 ----- playbooks/adhoc/zabbix_setup/create_template.yml | 57 ----- playbooks/adhoc/zabbix_setup/create_user.yml | 31 --- playbooks/adhoc/zabbix_setup/filter_plugins | 1 - playbooks/adhoc/zabbix_setup/roles | 1 - playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 38 ---- .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 11 - .../adhoc/zabbix_setup/vars/template_host.yml | 27 --- .../adhoc/zabbix_setup/vars/template_master.yml | 27 --- .../adhoc/zabbix_setup/vars/template_node.yml | 27 --- .../adhoc/zabbix_setup/vars/template_os_linux.yml | 90 -------- .../adhoc/zabbix_setup/vars/template_router.yml | 27 --- roles/lib_zabbix/README.md | 38 ++++ roles/lib_zabbix/library/__init__.py | 0 roles/lib_zabbix/library/test.yml | 131 +++++++++++ roles/lib_zabbix/library/zbx_application.py | 139 ++++++++++++ roles/lib_zabbix/library/zbx_discoveryrule.py | 177 +++++++++++++++ roles/lib_zabbix/library/zbx_host.py | 163 ++++++++++++++ roles/lib_zabbix/library/zbx_hostgroup.py | 116 ++++++++++ roles/lib_zabbix/library/zbx_item.py | 173 +++++++++++++++ roles/lib_zabbix/library/zbx_itemprototype.py | 241 +++++++++++++++++++++ roles/lib_zabbix/library/zbx_mediatype.py | 168 ++++++++++++++ roles/lib_zabbix/library/zbx_template.py | 133 ++++++++++++ roles/lib_zabbix/library/zbx_trigger.py | 174 +++++++++++++++ roles/lib_zabbix/library/zbx_user.py | 174 +++++++++++++++ roles/lib_zabbix/library/zbx_user_media.py | 240 ++++++++++++++++++++ roles/lib_zabbix/library/zbx_usergroup.py | 208 ++++++++++++++++++ roles/os_zabbix/README.md | 40 ++++ roles/os_zabbix/defaults/main.yml | 1 + roles/os_zabbix/handlers/main.yml | 1 + roles/os_zabbix/library/__init__.py | 0 roles/os_zabbix/library/get_drule.yml | 115 ---------- roles/os_zabbix/library/test.yml | 131 ----------- roles/os_zabbix/library/zbx_application.py | 135 ------------ roles/os_zabbix/library/zbx_discoveryrule.py | 177 --------------- roles/os_zabbix/library/zbx_host.py | 163 -------------- roles/os_zabbix/library/zbx_hostgroup.py | 116 ---------- roles/os_zabbix/library/zbx_item.py | 170 --------------- roles/os_zabbix/library/zbx_itemprototype.py | 241 --------------------- roles/os_zabbix/library/zbx_mediatype.py | 149 ------------- roles/os_zabbix/library/zbx_template.py | 127 ----------- roles/os_zabbix/library/zbx_trigger.py | 175 --------------- roles/os_zabbix/library/zbx_user.py | 169 --------------- roles/os_zabbix/library/zbx_usergroup.py | 160 -------------- roles/os_zabbix/meta/main.yml | 9 + roles/os_zabbix/tasks/clean_zabbix.yml | 47 ++++ roles/os_zabbix/tasks/create_template.yml | 56 +++++ roles/os_zabbix/tasks/create_user.yml | 11 + roles/os_zabbix/tasks/main.yml | 30 +++ roles/os_zabbix/vars/main.yml | 1 + roles/os_zabbix/vars/template_heartbeat.yml | 13 ++ roles/os_zabbix/vars/template_host.yml | 27 +++ roles/os_zabbix/vars/template_master.yml | 27 +++ roles/os_zabbix/vars/template_node.yml | 27 +++ roles/os_zabbix/vars/template_os_linux.yml | 143 ++++++++++++ roles/os_zabbix/vars/template_router.yml | 27 +++ 57 files changed, 2764 insertions(+), 2416 deletions(-) delete mode 100644 playbooks/adhoc/zabbix_setup/clean_zabbix.yml delete mode 100644 playbooks/adhoc/zabbix_setup/create_template.yml delete mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml delete mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins delete mode 120000 playbooks/adhoc/zabbix_setup/roles delete mode 100644 playbooks/adhoc/zabbix_setup/setup_zabbix.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_host.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_master.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_node.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_router.yml create mode 100644 roles/lib_zabbix/README.md create mode 100644 roles/lib_zabbix/library/__init__.py create mode 100644 roles/lib_zabbix/library/test.yml create mode 100644 roles/lib_zabbix/library/zbx_application.py create mode 100644 roles/lib_zabbix/library/zbx_discoveryrule.py create mode 100644 roles/lib_zabbix/library/zbx_host.py create mode 100644 roles/lib_zabbix/library/zbx_hostgroup.py create mode 100644 roles/lib_zabbix/library/zbx_item.py create mode 100644 roles/lib_zabbix/library/zbx_itemprototype.py create mode 100644 roles/lib_zabbix/library/zbx_mediatype.py create mode 100644 roles/lib_zabbix/library/zbx_template.py create mode 100644 roles/lib_zabbix/library/zbx_trigger.py create mode 100644 roles/lib_zabbix/library/zbx_user.py create mode 100644 roles/lib_zabbix/library/zbx_user_media.py create mode 100644 roles/lib_zabbix/library/zbx_usergroup.py create mode 100644 roles/os_zabbix/README.md create mode 100644 roles/os_zabbix/defaults/main.yml create mode 100644 roles/os_zabbix/handlers/main.yml delete mode 100644 roles/os_zabbix/library/__init__.py delete mode 100644 roles/os_zabbix/library/get_drule.yml delete mode 100644 roles/os_zabbix/library/test.yml delete mode 100644 roles/os_zabbix/library/zbx_application.py delete mode 100644 roles/os_zabbix/library/zbx_discoveryrule.py delete mode 100644 roles/os_zabbix/library/zbx_host.py delete mode 100644 roles/os_zabbix/library/zbx_hostgroup.py delete mode 100644 roles/os_zabbix/library/zbx_item.py delete mode 100644 roles/os_zabbix/library/zbx_itemprototype.py delete mode 100644 roles/os_zabbix/library/zbx_mediatype.py delete mode 100644 roles/os_zabbix/library/zbx_template.py delete mode 100644 roles/os_zabbix/library/zbx_trigger.py delete mode 100644 roles/os_zabbix/library/zbx_user.py delete mode 100644 roles/os_zabbix/library/zbx_usergroup.py create mode 100644 roles/os_zabbix/meta/main.yml create mode 100644 roles/os_zabbix/tasks/clean_zabbix.yml create mode 100644 roles/os_zabbix/tasks/create_template.yml create mode 100644 roles/os_zabbix/tasks/create_user.yml create mode 100644 roles/os_zabbix/tasks/main.yml create mode 100644 roles/os_zabbix/vars/main.yml create mode 100644 roles/os_zabbix/vars/template_heartbeat.yml create mode 100644 roles/os_zabbix/vars/template_host.yml create mode 100644 roles/os_zabbix/vars/template_master.yml create mode 100644 roles/os_zabbix/vars/template_node.yml create mode 100644 roles/os_zabbix/vars/template_os_linux.yml create mode 100644 roles/os_zabbix/vars/template_router.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py index a473993a2..c44b874e8 100644 --- a/filter_plugins/oo_zabbix_filters.py +++ b/filter_plugins/oo_zabbix_filters.py @@ -59,6 +59,17 @@ class FilterModule(object): return data[zabbix_item]['params'] return None + @staticmethod + def oo_build_zabbix_collect(data, string, value): + ''' Build a list of dicts from a list of data matched on string attribute + ''' + rval = [] + for item in data: + if item[string] == value: + rval.append(item) + + return rval + @staticmethod def oo_build_zabbix_list_dict(values, string): ''' Build a list of dicts with string as key for each value @@ -68,6 +79,22 @@ class FilterModule(object): rval.append({string: value}) return rval + @staticmethod + def oo_remove_attr_from_list_dict(data, attr): + ''' Remove a specific attribute from a dict + ''' + attrs = [] + if isinstance(attr, str): + attrs.append(attr) + else: + attrs = attr + + for attribute in attrs: + for _entry in data: + _entry.pop(attribute, None) + + return data + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -76,4 +103,6 @@ class FilterModule(object): "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid, "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, "create_data": self.create_data, + "oo_build_zabbix_collect": self.oo_build_zabbix_collect, + "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict, } diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml deleted file mode 100644 index a31cbef65..000000000 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix - roles: - - ../../../roles/os_zabbix - post_tasks: - - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - name: 'Template Heartbeat' - register: templ_heartbeat - - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - name: 'Template App Zabbix Server' - register: templ_zabbix_server - - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - name: 'Template App Zabbix Agent' - register: templ_zabbix_agent - - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: templates - - - debug: var=templ_heartbeat.results - - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: absent - with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" - when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml deleted file mode 100644 index 50fff53b2..000000000 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- debug: var=ctp_template - -- name: Create Template - zbx_template: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - name: "{{ ctp_template.name }}" - register: ctp_created_template - -- debug: var=ctp_created_template - -#- name: Create Application -# zbxapi: -# server: "{{ ctp_zserver }}" -# user: "{{ ctp_zuser }}" -# password: "{{ ctp_zpassword }}" -# zbx_class: Application -# state: present -# params: -# name: "{{ ctp_template.application.name}}" -# hostid: "{{ ctp_created_template.results[0].templateid }}" -# search: -# name: "{{ ctp_template.application.name}}" -# register: ctp_created_application - -#- debug: var=ctp_created_application - -- name: Create Items - zbx_item: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - key: "{{ item.key }}" - name: "{{ item.name | default(item.key, true) }}" - value_type: "{{ item.value_type | default('int') }}" - template_name: "{{ ctp_template.name }}" - with_items: ctp_template.zitems - register: ctp_created_items - -#- debug: var=ctp_created_items - -- name: Create Triggers - zbx_trigger: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - description: "{{ item.description }}" - expression: "{{ item.expression }}" - priority: "{{ item.priority }}" - with_items: ctp_template.ztriggers - when: ctp_template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml deleted file mode 100644 index dd74798b7..000000000 --- a/playbooks/adhoc/zabbix_setup/create_user.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' -# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: admin - g_zpassword: "{{ cli_password }}" - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: users - - - debug: var=users - - - name: Update zabbix creds for admin - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - alias: Admin - passwd: "{{ cli_new_password | default(g_zpassword, true) }}" diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/adhoc/zabbix_setup/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml deleted file mode 100644 index 1729194b5..000000000 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: templates - - - debug: var=templates - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_heartbeat }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_os_linux }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml deleted file mode 100644 index 22cc75554..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -g_template_heartbeat: - name: Template Heartbeat - zitems: - - name: Heartbeat Ping - hostid: - key: heartbeat.ping - ztriggers: - - description: 'Heartbeat.ping has failed on {HOST.NAME}' - expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' - priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: - params: - name: Template Host - host: Template Host - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Host - zitems: - - name: Host Ping - hostid: - key_: host.ping - type: 2 - value_type: 0 - output: extend - search: - key_: host.ping - ztriggers: - - description: 'Host ping has failed on {HOST.NAME}' - expression: '{Template Host:host.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Host ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: - params: - name: Template Master - host: Template Master - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Master - zitems: - - name: Master Etcd Ping - hostid: - key_: master.etcd.ping - type: 2 - value_type: 0 - output: extend - search: - key_: master.etcd.ping - ztriggers: - - description: 'Master Etcd ping has failed on {HOST.NAME}' - expression: '{Template Master:master.etcd.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Master Etcd ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: - params: - name: Template Node - host: Template Node - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Node - zitems: - - name: Kubelet Ping - hostid: - key_: kubelet.ping - type: 2 - value_type: 0 - output: extend - search: - key_: kubelet.ping - ztriggers: - - description: 'Kubelet ping has failed on {HOST.NAME}' - expression: '{Template Node:kubelet.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Kubelet ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml deleted file mode 100644 index 9cc038ffa..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -g_template_os_linux: - name: Template OS Linux - zitems: - - key: kernel.uname.sysname - value_type: string - - - key: kernel.all.cpu.wait.total - value_type: int - - - key: kernel.all.cpu.irq.hard - value_type: int - - - key: kernel.all.cpu.idle - value_type: int - - - key: kernel.uname.distro - value_type: string - - - key: kernel.uname.nodename - value_type: string - - - key: kernel.all.cpu.irq.soft - value_type: int - - - key: kernel.all.load.15_minute - value_type: float - - - key: kernel.all.cpu.sys - value_type: int - - - key: kernel.all.load.5_minute - value_type: float - - - key: mem.freemem - value_type: int - - - key: kernel.all.cpu.nice - value_type: int - - - key: mem.util.bufmem - value_type: int - - - key: swap.used - value_type: int - - - key: kernel.all.load.1_minute - value_type: float - - - key: kernel.uname.version - value_type: string - - - key: swap.length - value_type: int - - - key: mem.physmem - value_type: int - - - key: kernel.all.uptime - value_type: int - - - key: swap.free - value_type: int - - - key: mem.util.used - value_type: int - - - key: kernel.all.cpu.user - value_type: int - - - key: kernel.uname.machine - value_type: string - - - key: hinv.ncpu - value_type: int - - - key: mem.util.cached - value_type: int - - - key: kernel.all.cpu.steal - value_type: int - - - key: kernel.all.pswitch - value_type: int - - - key: kernel.uname.release - value_type: string - - - key: proc.nprocs - value_type: int diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: - params: - name: Template Router - host: Template Router - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Router - zitems: - - name: Router Backends down - hostid: - key_: router.backends.down - type: 2 - value_type: 0 - output: extend - search: - key_: router.backends.down - ztriggers: - - description: 'Number of router backends down on {HOST.NAME}' - expression: '{Template Router:router.backends.down.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Number of router backends down on {HOST.NAME}' - expandExpression: True diff --git a/roles/lib_zabbix/README.md b/roles/lib_zabbix/README.md new file mode 100644 index 000000000..69debc698 --- /dev/null +++ b/roles/lib_zabbix/README.md @@ -0,0 +1,38 @@ +zabbix +========= + +Automate zabbix tasks. + +Requirements +------------ + +This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Role Variables +-------------- + +None + +Dependencies +------------ + +This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Example Playbook +---------------- + + - zbx_host: + server: zab_server + user: zab_user + password: zab_password + name: 'myhost' + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/lib_zabbix/library/__init__.py b/roles/lib_zabbix/library/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/roles/lib_zabbix/library/test.yml b/roles/lib_zabbix/library/test.yml new file mode 100644 index 000000000..cedace1a0 --- /dev/null +++ b/roles/lib_zabbix/library/test.yml @@ -0,0 +1,131 @@ +--- +# This is a test playbook to create one of each of the zabbix ansible modules. +# ensure that the zbxapi module is installed +# ansible-playbook test.yml +- name: Test zabbix ansible module + hosts: localhost + gather_facts: no + vars: + zbx_server: http://localhost:8080/zabbix/api_jsonrpc.php + zbx_user: Admin + zbx_password: zabbix + + pre_tasks: + - name: Create a template + zbx_template: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test template' + register: template_output + + - debug: var=template_output + + - name: Create a discoveryrule + zbx_discoveryrule: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: test discoverule + key: test_listener + template_name: test template + lifetime: 14 + register: discoveryrule + + - debug: var=discoveryrule + + - name: Create an itemprototype + zbx_itemprototype: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'Test itemprototype on {#TEST_LISTENER}' + key: 'test[{#TEST_LISTENER}]' + template_name: test template + discoveryrule_name: test discoverule + register: itemproto + + - debug: var=itemproto + + - name: Create an application + zbx_application: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'Test App' + template_name: "test template" + register: item_output + + - name: Create an item + zbx_item: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test item' + key: 'kenny.item.1' + applications: + - 'Test App' + template_name: "test template" + register: item_output + + - debug: var=item_output + + - name: Create an trigger + zbx_trigger: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + expression: '{test template:kenny.item.1.last()}>2' + description: 'Kenny desc' + register: trigger_output + + - debug: var=trigger_output + + - name: Create a hostgroup + zbx_hostgroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny hostgroup' + register: hostgroup_output + + - debug: var=hostgroup_output + + - name: Create a host + zbx_host: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny host' + template_names: + - test template + hostgroup_names: + - kenny hostgroup + register: host_output + + - debug: var=host_output + + - name: Create a usergroup + zbx_usergroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: kenny usergroup + rights: + - 'kenny hostgroup': rw + register: usergroup_output + + - debug: var=usergroup_output + + - name: Create a user + zbx_user: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + alias: kenny user + passwd: zabbix + usergroups: + - kenny usergroup + register: user_output + + - debug: var=user_output diff --git a/roles/lib_zabbix/library/zbx_application.py b/roles/lib_zabbix/library/zbx_application.py new file mode 100644 index 000000000..01df1a98e --- /dev/null +++ b/roles/lib_zabbix/library/zbx_application.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +''' +Ansible module for application +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix application ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append(content['result'][0]['templateid']) + return template_ids + +def main(): + ''' Ansible module for application + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + template_name=dict(default=None, type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], + module.params['user'], + module.params['password'], + module.params['debug'])) + + #Set the instance and the application for the rest of the calls + zbx_class_name = 'application' + idname = 'applicationid' + aname = module.params['name'] + state = module.params['state'] + # get a applicationid, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': aname}, + 'selectHost': 'hostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], + 'name': aname, + } + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_discoveryrule.py b/roles/lib_zabbix/library/zbx_discoveryrule.py new file mode 100644 index 000000000..56b87fecc --- /dev/null +++ b/roles/lib_zabbix/library/zbx_discoveryrule.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(vtype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'JMX': 16, + } + + for typ in _types.keys(): + if vtype in typ or vtype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'discoveryrule' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + #'selectDServices': 'extend', + #'selectDChecks': 'extend', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'lifetime': module.params['lifetime'], + 'type': get_type(module.params['ztype']), + } + if params['type'] in [2, 5, 7, 11]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py new file mode 100644 index 000000000..12c5f3456 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_host.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +''' +Zabbix host ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_group_ids(zapi, hostgroup_names): + ''' + get hostgroups + ''' + # Fetch groups by name + group_ids = [] + for hgr in hostgroup_names: + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) + if content.has_key('result'): + group_ids.append({'groupid': content['result'][0]['groupid']}) + + return group_ids + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append({'templateid': content['result'][0]['templateid']}) + return template_ids + +def main(): + ''' + Ansible module for zabbix host + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + hostgroup_names=dict(default=[], type='list'), + template_names=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + interfaces=dict(default=None, type='list'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'host' + idname = "hostid" + hname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': hname}, + 'selectGroups': 'groupid', + 'selectParentTemplates': 'templateid', + 'selectInterfaces': 'interfaceid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent + 'main': 1, # default interface? 1 = true + 'useip': 1, # default interface? 1 = true + 'ip': '127.0.0.1', # default interface? 1 = true + 'dns': '', # dns for host + 'port': '10050', # port for interface? 10050 + }] + params = {'host': hname, + 'groups': get_group_ids(zapi, module.params['hostgroup_names']), + 'templates': get_template_ids(zapi, module.params['template_names']), + 'interfaces': ifs, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_hostgroup.py b/roles/lib_zabbix/library/zbx_hostgroup.py new file mode 100644 index 000000000..a1eb875d4 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_hostgroup.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +''' Ansible module for hostgroup +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix hostgroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' ansible module for hostgroup + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'hostgroup' + idname = "groupid" + hname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': hname}, + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': hname} + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py new file mode 100644 index 000000000..64dbb976f --- /dev/null +++ b/roles/lib_zabbix/library/zbx_item.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +''' + Ansible module for zabbix items +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix item ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + if isinstance(application_names, str): + application_names = [application_names] + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + ansible zabbix module for zbx_item + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + template_name=dict(default=None, type='str'), + zabbix_type=dict(default=2, type='int'), + value_type=dict(default='int', type='str'), + applications=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'item' + idname = "itemid" + state = module.params['state'] + key = module.params['key'] + template_name = module.params['template_name'] + + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + templateid = None + if content['result']: + templateid = content['result'][0]['templateid'] + else: + module.exit_json(changed=False, + results='Error: Could find template with name %s for item.' % template_name, + state="Unkown") + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'key_': key}, + 'selectApplications': 'applicationid', + }) + + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': module.params.get('name', module.params['key']), + 'key_': key, + 'hostid': templateid, + 'type': module.params['zabbix_type'], + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py new file mode 100644 index 000000000..f0eb6bbbd --- /dev/null +++ b/roles/lib_zabbix/library/zbx_itemprototype.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rule_id(zapi, discoveryrule_name): + '''get a discoveryrule by name + ''' + content = zapi.get_content('discoveryrule', + 'get', + {'search': {'name': discoveryrule_name}, + 'output': 'extend', + }) + if not content['result']: + return None + return content['result'][0]['itemid'] + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(ztype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'aggregate': 8, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'calculated': 15, + 'JMX': 16, + } + + for typ in _types.keys(): + if ztype in typ or ztype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_status(status): + ''' Determine status + ''' + _status = 0 + if status == 'disabled': + _status = 1 + elif status == 'unsupported': + _status = 3 + + return _status + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + value_type=dict(default='float', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + status=dict(default='enabled', type='str'), + discoveryrule_name=dict(default=None, type='str'), + applications=dict(default=[], type='list'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'itemprototype' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + 'selectApplications': 'applicationid', + 'selectDiscoveryRule': 'itemid', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), + 'type': get_type(module.params['ztype']), + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + if params['type'] in [2, 5, 7, 8, 11, 15]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'ruleid': + if value != zab_results['discoveryRule']['itemid']: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_mediatype.py b/roles/lib_zabbix/library/zbx_mediatype.py new file mode 100644 index 000000000..b8dcaf7aa --- /dev/null +++ b/roles/lib_zabbix/library/zbx_mediatype.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python +''' + Ansible module for mediatype +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix mediatype ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_mtype(mtype): + ''' + Transport used by the media type. + Possible values: + 0 - email; + 1 - script; + 2 - SMS; + 3 - Jabber; + 100 - Ez Texting. + ''' + mtype = mtype.lower() + media_type = None + if mtype == 'script': + media_type = 1 + elif mtype == 'sms': + media_type = 2 + elif mtype == 'jabber': + media_type = 3 + elif mtype == 'script': + media_type = 100 + else: + media_type = 0 + + return media_type + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + description=dict(default=None, type='str'), + mtype=dict(default=None, type='str'), + smtp_server=dict(default=None, type='str'), + smtp_helo=dict(default=None, type='str'), + smtp_email=dict(default=None, type='str'), + passwd=dict(default=None, type='str'), + path=dict(default=None, type='str'), + username=dict(default=None, type='str'), + status=dict(default='enabled', type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'mediatype' + idname = "mediatypeid" + description = module.params['description'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + status = 1 + if module.params['status']: + status = 0 + params = {'description': description, + 'type': get_mtype(module.params['mtype']), + 'smtp_server': module.params['smtp_server'], + 'smtp_helo': module.params['smtp_helo'], + 'smtp_email': module.params['smtp_email'], + 'passwd': module.params['passwd'], + 'exec_path': module.params['path'], + 'username': module.params['username'], + 'status': status, + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and \ + zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_template.py b/roles/lib_zabbix/library/zbx_template.py new file mode 100644 index 000000000..f86f22003 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_template.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +''' +Ansible module for template +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix template ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' Ansible module for template + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zbc = ZabbixConnection(module.params['server'], + module.params['user'], + module.params['password'], + module.params['debug']) + zapi = ZabbixAPI(zbc) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'template' + idname = 'templateid' + tname = module.params['name'] + state = module.params['state'] + # get a template, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': tname}, + 'selectParentTemplates': 'templateid', + 'selectGroups': 'groupid', + 'selectApplications': 'applicationid', + 'selectDiscoveries': 'extend', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + if not tname: + module.exit_json(failed=True, + changed=False, + results='Must specifiy a template name.', + state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'groups': module.params.get('groups', [{'groupid': '1'}]), + 'host': tname, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py new file mode 100644 index 000000000..6f5392437 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_trigger.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +''' +ansible module for zabbix triggers +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix trigger ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_priority(priority): + ''' determine priority + ''' + prior = 0 + if 'info' in priority: + prior = 1 + elif 'warn' in priority: + prior = 2 + elif 'avg' == priority or 'ave' in priority: + prior = 3 + elif 'high' in priority: + prior = 4 + elif 'dis' in priority: + prior = 5 + + return prior + +def get_deps(zapi, deps): + ''' get trigger dependencies + ''' + results = [] + for desc in deps: + content = zapi.get_content('trigger', + 'get', + {'search': {'description': desc}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if content.has_key('result'): + results.append({'triggerid': content['result'][0]['triggerid']}) + + return results + +def main(): + ''' + Create a trigger in zabbix + + Example: + "params": { + "description": "Processor load is too high on {HOST.NAME}", + "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", + "dependencies": [ + { + "triggerid": "14062" + } + ] + }, + + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + expression=dict(default=None, type='str'), + description=dict(default=None, type='str'), + dependencies=dict(default=[], type='list'), + priority=dict(default='avg', type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'trigger' + idname = "triggerid" + state = module.params['state'] + description = module.params['description'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'description': description}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'expression': module.params['expression'], + 'dependencies': get_deps(zapi, module.params['dependencies']), + 'priority': get_priority(module.params['priority']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py new file mode 100644 index 000000000..220caa6f1 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_user.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +''' +ansible module for zabbix users +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_usergroups(zapi, usergroups): + ''' Get usergroups + ''' + ugroups = [] + for ugr in usergroups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': ugr}, + #'selectUsers': 'userid', + #'getRights': 'extend' + }) + if content['result']: + ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) + + return ugroups or None + +def get_usertype(user_type): + ''' + Determine zabbix user account type + ''' + if not user_type: + return None + + utype = 1 + if 'super' in user_type: + utype = 3 + elif 'admin' in user_type or user_type == 'admin': + utype = 2 + + return utype + +def main(): + ''' + ansible zabbix module for users + ''' + + ##def user(self, name, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + alias=dict(default=None, type='str'), + name=dict(default=None, type='str'), + surname=dict(default=None, type='str'), + user_type=dict(default=None, type='str'), + passwd=dict(default=None, type='str'), + user_groups=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, password, module.params['debug'])) + + ## before we can create a user media and users with media types we need media + zbx_class_name = 'user' + idname = "userid" + alias = module.params['alias'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'output': 'extend', + 'search': {'alias': alias}, + "selectUsrgrps": 'usergrpid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content) or len(content['result']) == 0: + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'alias': alias, + 'passwd': module.params['passwd'], + 'usrgrps': get_usergroups(zapi, module.params['user_groups']), + 'name': module.params['name'], + 'surname': module.params['surname'], + 'type': get_usertype(module.params['user_type']), + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'passwd': + differences[key] = value + + elif key == 'usrgrps': + # this must be done as a list of ordered dictionaries fails comparison + if not all([True for _ in zab_results[key][0] if _ in value[0]]): + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py new file mode 100644 index 000000000..f590c58fe --- /dev/null +++ b/roles/lib_zabbix/library/zbx_user_media.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +''' + Ansible module for user media +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user media ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_mtype(zapi, mtype): + '''Get mediatype + + If passed an int, return it as the mediatypeid + if its a string, then try to fetch through a description + ''' + if isinstance(mtype, int): + return mtype + try: + return int(mtype) + except ValueError: + pass + + content = zapi.get_content('mediatype', 'get', {'search': {'description': mtype}}) + if content.has_key['result'] and content['result']: + return content['result'][0]['mediatypeid'] + + return None + +def get_user(zapi, user): + ''' Get userids from user aliases + ''' + content = zapi.get_content('user', 'get', {'search': {'alias': user}}) + if content['result']: + return content['result'][0] + + return None + +def get_severity(severity): + ''' determine severity + ''' + if isinstance(severity, int) or \ + isinstance(severity, str): + return severity + + val = 0 + sev_map = { + 'not': 2**0, + 'inf': 2**1, + 'war': 2**2, + 'ave': 2**3, + 'avg': 2**3, + 'hig': 2**4, + 'dis': 2**5, + } + for level in severity: + val |= sev_map[level[:3].lower()] + return val + +def get_zbx_user_query_data(zapi, user_name): + ''' If name exists, retrieve it, and build query params. + ''' + query = {} + if user_name: + zbx_user = get_user(zapi, user_name) + query = {'userids': zbx_user['userid']} + + return query + +def find_media(medias, user_media): + ''' Find the user media in the list of medias + ''' + for media in medias: + if all([media[key] == user_media[key] for key in user_media.keys()]): + return media + return None + +def get_active(in_active): + '''Determine active value + ''' + active = 1 + if in_active: + active = 0 + + return active + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + active=dict(default=False, type='bool'), + medias=dict(default=None, type='list'), + mediaid=dict(default=None, type='int'), + mediatype=dict(default=None, type='str'), + mediatype_desc=dict(default=None, type='str'), + #d-d,hh:mm-hh:mm;d-d,hh:mm-hh:mm... + period=dict(default=None, type='str'), + sendto=dict(default=None, type='str'), + severity=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'user' + idname = "mediaid" + state = module.params['state'] + + # User media is fetched through the usermedia.get + zbx_user_query = get_zbx_user_query_data(zapi, module.params['name']) + content = zapi.get_content('usermedia', 'get', zbx_user_query) + + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content) or len(content['result']) == 0: + module.exit_json(changed=False, state="absent") + + # TODO: Do we remove all the queried results? This could be catastrophic or desired. + #ids = [med[idname] for med in content['result']] + ids = [content['result'][0][idname]] + content = zapi.get_content(zbx_class_name, 'deletemedia', ids) + + if content.has_key('error'): + module.exit_json(changed=False, results=content['error'], state="absent") + + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + active = get_active(module.params['active']) + mtypeid = None + if module.params['mediatype']: + mtypeid = get_mtype(zapi, module.params['mediatype']) + elif module.params['mediatype_desc']: + mtypeid = get_mtype(zapi, module.params['mediatype_desc']) + + medias = module.params['medias'] + if medias == None: + medias = [{'mediatypeid': mtypeid, + 'sendto': module.params['sendto'], + 'active': active, + 'severity': int(get_severity(module.params['severity'])), + 'period': module.params['period'], + }] + + params = {'users': [zbx_user_query], + 'medias': medias, + 'output': 'extend', + } + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'addmedia', params) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state='present') + + # mediaid signifies an update + # If user params exists, check to see if they already exist in zabbix + # if they exist, then return as no update + # elif they do not exist, then take user params only + differences = {'medias': [], 'users': {}} + for media in params['medias']: + m_result = find_media(content['result'], media) + if not m_result: + differences['medias'].append(media) + + if not differences['medias']: + module.exit_json(changed=False, results=content['result'], state="present") + + for user in params['users']: + differences['users']['userid'] = user['userids'] + + # We have differences and need to update + content = zapi.get_content(zbx_class_name, 'updatemedia', differences) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_usergroup.py b/roles/lib_zabbix/library/zbx_usergroup.py new file mode 100644 index 000000000..11aef106c --- /dev/null +++ b/roles/lib_zabbix/library/zbx_usergroup.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +''' +zabbix ansible module for usergroups +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix usergroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rights(zapi, rights): + '''Get rights + ''' + if rights == None: + return None + + perms = [] + for right in rights: + hstgrp = right.keys()[0] + perm = right.values()[0] + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) + if content['result']: + permission = 0 + if perm == 'ro': + permission = 2 + elif perm == 'rw': + permission = 3 + perms.append({'id': content['result'][0]['groupid'], + 'permission': permission}) + return perms + +def get_gui_access(access): + ''' Return the gui_access for a usergroup + ''' + access = access.lower() + if access == 'internal': + return 1 + elif access == 'disabled': + return 2 + + return 0 + +def get_debug_mode(mode): + ''' Return the debug_mode for a usergroup + ''' + mode = mode.lower() + if mode == 'enabled': + return 1 + + return 0 + +def get_user_status(status): + ''' Return the user_status for a usergroup + ''' + status = status.lower() + if status == 'enabled': + return 0 + + return 1 + + +#def get_userids(zapi, users): +# ''' Get userids from user aliases +# ''' +# if not users: +# return None +# +# userids = [] +# for alias in users: +# content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) +# if content['result']: +# userids.append(content['result'][0]['userid']) +# +# return userids + +def main(): + ''' Ansible module for usergroup + ''' + + ##def usergroup(self, name, rights=None, users=None, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), + password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), + debug_mode=dict(default='disabled', type='str'), + gui_access=dict(default='default', type='str'), + status=dict(default='enabled', type='str'), + name=dict(default=None, type='str', required=True), + rights=dict(default=None, type='list'), + #users=dict(default=None, type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], + module.params['user'], + module.params['password'], + module.params['debug'])) + + zbx_class_name = 'usergroup' + idname = "usrgrpid" + uname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': uname}, + 'selectUsers': 'userid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + if not uname: + module.exit_json(failed=True, changed=False, results='Need to pass in a user.', state="error") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + + params = {'name': uname, + 'rights': get_rights(zapi, module.params['rights']), + 'users_status': get_user_status(module.params['status']), + 'gui_access': get_gui_access(module.params['gui_access']), + 'debug_mode': get_debug_mode(module.params['debug_mode']), + #'userids': get_userids(zapi, module.params['users']), + } + + _ = [params.pop(key, None) for key in params.keys() if params[key] == None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'rights': + differences['rights'] = value + + #elif key == 'userids' and zab_results.has_key('users'): + #if zab_results['users'] != value: + #differences['userids'] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/README.md b/roles/os_zabbix/README.md new file mode 100644 index 000000000..ac3dc2833 --- /dev/null +++ b/roles/os_zabbix/README.md @@ -0,0 +1,40 @@ +os_zabbix +========= + +Automate zabbix tasks. + +Requirements +------------ + +This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Role Variables +-------------- + +zab_server +zab_username +zab_password + +Dependencies +------------ + +This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Example Playbook +---------------- + + - zbx_host: + server: zab_server + user: zab_user + password: zab_password + name: 'myhost' + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/os_zabbix/defaults/main.yml b/roles/os_zabbix/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/handlers/main.yml b/roles/os_zabbix/handlers/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/handlers/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/os_zabbix/library/get_drule.yml b/roles/os_zabbix/library/get_drule.yml deleted file mode 100644 index a3e39f535..000000000 --- a/roles/os_zabbix/library/get_drule.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -# This is a test playbook to create one of each of the zabbix ansible modules. -# ensure that the zbxapi module is installed -# ansible-playbook test.yml -- name: Test zabbix ansible module - hosts: localhost - gather_facts: no - vars: -#zbx_server: https://localhost/zabbix/api_jsonrpc.php -#zbx_user: Admin -#zbx_password: zabbix - - pre_tasks: - - name: Template Discovery rules - zbx_template: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Template App HaProxy' - state: list - register: template_output - - - debug: var=template_output - - - name: Discovery rules - zbx_discovery_rule: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'haproxy.discovery sender' - state: list - register: drule - - - debug: var=drule - -# - name: Create an application -# zbx_application: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'Test App' -# template_name: "test template" -# register: item_output -# -# - name: Create an item -# zbx_item: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'test item' -# key: 'kenny.item.1' -# applications: -# - 'Test App' -# template_name: "test template" -# register: item_output -# -# - debug: var=item_output -# -# - name: Create an trigger -# zbx_trigger: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# expression: '{test template:kenny.item.1.last()}>2' -# description: 'Kenny desc' -# register: trigger_output -# -# - debug: var=trigger_output -# -# - name: Create a hostgroup -# zbx_hostgroup: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'kenny hostgroup' -# register: hostgroup_output -# -# - debug: var=hostgroup_output -# -# - name: Create a host -# zbx_host: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'kenny host' -# template_names: -# - test template -# hostgroup_names: -# - kenny hostgroup -# register: host_output -# -# - debug: var=host_output -# -# - name: Create a usergroup -# zbx_usergroup: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: kenny usergroup -# rights: -# - 'kenny hostgroup': rw -# register: usergroup_output -# -# - debug: var=usergroup_output -# -# - name: Create a user -# zbx_user: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# alias: kwoodson -# state: list -# register: user_output -# -# - debug: var=user_output diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml deleted file mode 100644 index cedace1a0..000000000 --- a/roles/os_zabbix/library/test.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -# This is a test playbook to create one of each of the zabbix ansible modules. -# ensure that the zbxapi module is installed -# ansible-playbook test.yml -- name: Test zabbix ansible module - hosts: localhost - gather_facts: no - vars: - zbx_server: http://localhost:8080/zabbix/api_jsonrpc.php - zbx_user: Admin - zbx_password: zabbix - - pre_tasks: - - name: Create a template - zbx_template: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test template' - register: template_output - - - debug: var=template_output - - - name: Create a discoveryrule - zbx_discoveryrule: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: test discoverule - key: test_listener - template_name: test template - lifetime: 14 - register: discoveryrule - - - debug: var=discoveryrule - - - name: Create an itemprototype - zbx_itemprototype: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test itemprototype on {#TEST_LISTENER}' - key: 'test[{#TEST_LISTENER}]' - template_name: test template - discoveryrule_name: test discoverule - register: itemproto - - - debug: var=itemproto - - - name: Create an application - zbx_application: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test App' - template_name: "test template" - register: item_output - - - name: Create an item - zbx_item: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test item' - key: 'kenny.item.1' - applications: - - 'Test App' - template_name: "test template" - register: item_output - - - debug: var=item_output - - - name: Create an trigger - zbx_trigger: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - expression: '{test template:kenny.item.1.last()}>2' - description: 'Kenny desc' - register: trigger_output - - - debug: var=trigger_output - - - name: Create a hostgroup - zbx_hostgroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny hostgroup' - register: hostgroup_output - - - debug: var=hostgroup_output - - - name: Create a host - zbx_host: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny host' - template_names: - - test template - hostgroup_names: - - kenny hostgroup - register: host_output - - - debug: var=host_output - - - name: Create a usergroup - zbx_usergroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: kenny usergroup - rights: - - 'kenny hostgroup': rw - register: usergroup_output - - - debug: var=usergroup_output - - - name: Create a user - zbx_user: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - alias: kenny user - passwd: zabbix - usergroups: - - kenny usergroup - register: user_output - - - debug: var=user_output diff --git a/roles/os_zabbix/library/zbx_application.py b/roles/os_zabbix/library/zbx_application.py deleted file mode 100644 index 5d4acf72d..000000000 --- a/roles/os_zabbix/library/zbx_application.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for application -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix application ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append(content['result'][0]['templateid']) - return template_ids - -def main(): - ''' Ansible module for application - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - template_name=dict(default=None, type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the application for the rest of the calls - zbx_class_name = 'application' - idname = 'applicationid' - aname = module.params['name'] - state = module.params['state'] - # get a applicationid, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': aname}, - 'selectHost': 'hostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], - 'name': aname, - } - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_discoveryrule.py b/roles/os_zabbix/library/zbx_discoveryrule.py deleted file mode 100644 index 56b87fecc..000000000 --- a/roles/os_zabbix/library/zbx_discoveryrule.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(vtype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'JMX': 16, - } - - for typ in _types.keys(): - if vtype in typ or vtype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'discoveryrule' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - #'selectDServices': 'extend', - #'selectDChecks': 'extend', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'lifetime': module.params['lifetime'], - 'type': get_type(module.params['ztype']), - } - if params['type'] in [2, 5, 7, 11]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py deleted file mode 100644 index 12c5f3456..000000000 --- a/roles/os_zabbix/library/zbx_host.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix host ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_group_ids(zapi, hostgroup_names): - ''' - get hostgroups - ''' - # Fetch groups by name - group_ids = [] - for hgr in hostgroup_names: - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) - if content.has_key('result'): - group_ids.append({'groupid': content['result'][0]['groupid']}) - - return group_ids - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append({'templateid': content['result'][0]['templateid']}) - return template_ids - -def main(): - ''' - Ansible module for zabbix host - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - hostgroup_names=dict(default=[], type='list'), - template_names=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - interfaces=dict(default=None, type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'host' - idname = "hostid" - hname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': hname}, - 'selectGroups': 'groupid', - 'selectParentTemplates': 'templateid', - 'selectInterfaces': 'interfaceid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent - 'main': 1, # default interface? 1 = true - 'useip': 1, # default interface? 1 = true - 'ip': '127.0.0.1', # default interface? 1 = true - 'dns': '', # dns for host - 'port': '10050', # port for interface? 10050 - }] - params = {'host': hname, - 'groups': get_group_ids(zapi, module.params['hostgroup_names']), - 'templates': get_template_ids(zapi, module.params['template_names']), - 'interfaces': ifs, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py deleted file mode 100644 index a1eb875d4..000000000 --- a/roles/os_zabbix/library/zbx_hostgroup.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -''' Ansible module for hostgroup -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix hostgroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' ansible module for hostgroup - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'hostgroup' - idname = "groupid" - hname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': hname}, - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': hname} - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py deleted file mode 100644 index 45ba6c2b0..000000000 --- a/roles/os_zabbix/library/zbx_item.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for zabbix items -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix item ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - ansible zabbix module for zbx_item - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - template_name=dict(default=None, type='str'), - zabbix_type=dict(default=2, type='int'), - value_type=dict(default='int', type='str'), - applications=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'item' - idname = "itemid" - state = module.params['state'] - key = module.params['key'] - template_name = module.params['template_name'] - - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - templateid = None - if content['result']: - templateid = content['result'][0]['templateid'] - else: - module.exit_json(changed=False, - results='Error: Could find template with name %s for item.' % template_name, - state="Unkown") - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'key_': key}, - 'selectApplications': 'applicationid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': module.params.get('name', module.params['key']), - 'key_': key, - 'hostid': templateid, - 'type': module.params['zabbix_type'], - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_itemprototype.py b/roles/os_zabbix/library/zbx_itemprototype.py deleted file mode 100644 index f0eb6bbbd..000000000 --- a/roles/os_zabbix/library/zbx_itemprototype.py +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rule_id(zapi, discoveryrule_name): - '''get a discoveryrule by name - ''' - content = zapi.get_content('discoveryrule', - 'get', - {'search': {'name': discoveryrule_name}, - 'output': 'extend', - }) - if not content['result']: - return None - return content['result'][0]['itemid'] - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(ztype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'aggregate': 8, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'calculated': 15, - 'JMX': 16, - } - - for typ in _types.keys(): - if ztype in typ or ztype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_status(status): - ''' Determine status - ''' - _status = 0 - if status == 'disabled': - _status = 1 - elif status == 'unsupported': - _status = 3 - - return _status - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - value_type=dict(default='float', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - status=dict(default='enabled', type='str'), - discoveryrule_name=dict(default=None, type='str'), - applications=dict(default=[], type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'itemprototype' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - 'selectApplications': 'applicationid', - 'selectDiscoveryRule': 'itemid', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), - 'type': get_type(module.params['ztype']), - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - if params['type'] in [2, 5, 7, 8, 11, 15]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'ruleid': - if value != zab_results['discoveryRule']['itemid']: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py deleted file mode 100644 index a49aecd0f..000000000 --- a/roles/os_zabbix/library/zbx_mediatype.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for mediatype -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix mediatype ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True -def get_mtype(mtype): - ''' - Transport used by the media type. - Possible values: - 0 - email; - 1 - script; - 2 - SMS; - 3 - Jabber; - 100 - Ez Texting. - ''' - mtype = mtype.lower() - media_type = None - if mtype == 'script': - media_type = 1 - elif mtype == 'sms': - media_type = 2 - elif mtype == 'jabber': - media_type = 3 - elif mtype == 'script': - media_type = 100 - else: - media_type = 0 - - return media_type - -def main(): - ''' - Ansible zabbix module for mediatype - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - description=dict(default=None, type='str'), - mtype=dict(default=None, type='str'), - smtp_server=dict(default=None, type='str'), - smtp_helo=dict(default=None, type='str'), - smtp_email=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'mediatype' - idname = "mediatypeid" - description = module.params['description'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'description': description, - 'type': get_mtype(module.params['media_type']), - 'smtp_server': module.params['smtp_server'], - 'smtp_helo': module.params['smtp_helo'], - 'smtp_email': module.params['smtp_email'], - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and \ - zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py deleted file mode 100644 index 20ea48a85..000000000 --- a/roles/os_zabbix/library/zbx_template.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for template -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix template ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' Ansible module for template - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug']) - zapi = ZabbixAPI(zbc) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'template' - idname = 'templateid' - tname = module.params['name'] - state = module.params['state'] - # get a template, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': tname}, - 'selectParentTemplates': 'templateid', - 'selectGroups': 'groupid', - 'selectApplications': 'applicationid', - 'selectDiscoveries': 'extend', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'groups': module.params.get('groups', [{'groupid': '1'}]), - 'host': tname, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py deleted file mode 100644 index 7cc9356c8..000000000 --- a/roles/os_zabbix/library/zbx_trigger.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix triggers -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix trigger ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - - -def get_priority(priority): - ''' determine priority - ''' - prior = 0 - if 'info' in priority: - prior = 1 - elif 'warn' in priority: - prior = 2 - elif 'avg' == priority or 'ave' in priority: - prior = 3 - elif 'high' in priority: - prior = 4 - elif 'dis' in priority: - prior = 5 - - return prior - -def get_deps(zapi, deps): - ''' get trigger dependencies - ''' - results = [] - for desc in deps: - content = zapi.get_content('trigger', - 'get', - {'search': {'description': desc}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if content.has_key('result'): - results.append({'triggerid': content['result'][0]['triggerid']}) - - return results - -def main(): - ''' - Create a trigger in zabbix - - Example: - "params": { - "description": "Processor load is too high on {HOST.NAME}", - "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", - "dependencies": [ - { - "triggerid": "14062" - } - ] - }, - - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - expression=dict(default=None, type='str'), - description=dict(default=None, type='str'), - dependencies=dict(default=[], type='list'), - priority=dict(default='avg', type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'trigger' - idname = "triggerid" - state = module.params['state'] - description = module.params['description'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'description': description}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'description': description, - 'expression': module.params['expression'], - 'dependencies': get_deps(zapi, module.params['dependencies']), - 'priority': get_priority(module.params['priority']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py deleted file mode 100644 index c45c9a75d..000000000 --- a/roles/os_zabbix/library/zbx_user.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix users -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix user ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_usergroups(zapi, usergroups): - ''' Get usergroups - ''' - ugroups = [] - for ugr in usergroups: - content = zapi.get_content('usergroup', - 'get', - {'search': {'name': ugr}, - #'selectUsers': 'userid', - #'getRights': 'extend' - }) - if content['result']: - ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) - - return ugroups or None - -def get_usertype(user_type): - ''' - Determine zabbix user account type - ''' - if not user_type: - return None - - utype = 1 - if 'super' in user_type: - utype = 3 - elif 'admin' in user_type or user_type == 'admin': - utype = 2 - - return utype - -def main(): - ''' - ansible zabbix module for users - ''' - - ##def user(self, name, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - alias=dict(default=None, type='str'), - name=dict(default=None, type='str'), - surname=dict(default=None, type='str'), - user_type=dict(default=None, type='str'), - passwd=dict(default=None, type='str'), - usergroups=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, password, module.params['debug'])) - - ## before we can create a user media and users with media types we need media - zbx_class_name = 'user' - idname = "userid" - alias = module.params['alias'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'output': 'extend', - 'search': {'alias': alias}, - "selectUsrgrps": 'usergrpid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'alias': alias, - 'passwd': module.params['passwd'], - 'usrgrps': get_usergroups(zapi, module.params['usergroups']), - 'name': module.params['name'], - 'surname': module.params['surname'], - 'type': get_usertype(module.params['user_type']), - } - - # Remove any None valued params - _ = [params.pop(key, None) for key in params.keys() if params[key] is None] - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'passwd': - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py deleted file mode 100644 index ede4c9df1..000000000 --- a/roles/os_zabbix/library/zbx_usergroup.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -''' -zabbix ansible module for usergroups -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix usergroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rights(zapi, rights): - '''Get rights - ''' - perms = [] - for right in rights: - hstgrp = right.keys()[0] - perm = right.values()[0] - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) - if content['result']: - permission = 0 - if perm == 'ro': - permission = 2 - elif perm == 'rw': - permission = 3 - perms.append({'id': content['result'][0]['groupid'], - 'permission': permission}) - return perms - -def get_userids(zapi, users): - ''' Get userids from user aliases - ''' - userids = [] - for alias in users: - content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) - if content['result']: - userids.append(content['result'][0]['userid']) - - return userids - -def main(): - ''' Ansible module for usergroup - ''' - - ##def usergroup(self, name, rights=None, users=None, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - rights=dict(default=[], type='list'), - users=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - zbx_class_name = 'usergroup' - idname = "usrgrpid" - uname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': uname}, - 'selectUsers': 'userid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': uname, - 'rights': get_rights(zapi, module.params['rights']), - 'userids': get_userids(zapi, module.params['users']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'rights': - differences['rights'] = value - - elif key == 'userids' and zab_results.has_key('users'): - if zab_results['users'] != value: - differences['userids'] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/meta/main.yml b/roles/os_zabbix/meta/main.yml new file mode 100644 index 000000000..360f5aad2 --- /dev/null +++ b/roles/os_zabbix/meta/main.yml @@ -0,0 +1,9 @@ +--- +galaxy_info: + author: OpenShift + description: ZabbixAPI + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: +- lib_zabbix diff --git a/roles/os_zabbix/tasks/clean_zabbix.yml b/roles/os_zabbix/tasks/clean_zabbix.yml new file mode 100644 index 000000000..1bcc07d42 --- /dev/null +++ b/roles/os_zabbix/tasks/clean_zabbix.yml @@ -0,0 +1,47 @@ +--- +- name: CLEAN List template for heartbeat + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + state: list + name: 'Template Heartbeat' + register: templ_heartbeat + +- name: CLEAN List template app zabbix server + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + state: list + name: 'Template App Zabbix Server' + register: templ_zabbix_server + +- name: CLEAN List template app zabbix server + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + state: list + name: 'Template App Zabbix Agent' + register: templ_zabbix_agent + +- name: CLEAN List all templates + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + state: list + register: templates + +- debug: var=templ_heartbeat.results + +- name: Remove templates if heartbeat template is missing + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + name: "{{ item }}" + state: absent + with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" + when: templ_heartbeat.results | length == 0 diff --git a/roles/os_zabbix/tasks/create_template.yml b/roles/os_zabbix/tasks/create_template.yml new file mode 100644 index 000000000..070390aba --- /dev/null +++ b/roles/os_zabbix/tasks/create_template.yml @@ -0,0 +1,56 @@ +--- +- debug: var=template + +- name: Template Create Template + zbx_template: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + name: "{{ template.name }}" + register: created_template + +- debug: var=created_template + +- name: Create Application + zbx_application: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + name: "{{ item }}" + template_name: "{{ template.name }}" + with_items: template.zapplications + register: created_application + when: template.zapplications is defined + +- debug: var=created_application + +- name: Create Items + zbx_item: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + key: "{{ item.key }}" + name: "{{ item.name | default(item.key, true) }}" + value_type: "{{ item.value_type | default('int') }}" + template_name: "{{ template.name }}" + applications: "{{ item.application }}" + with_items: template.zitems + register: created_items + when: template.zitems is defined + +#- debug: var=ctp_created_items + +- name: Create Triggers + zbx_trigger: + server: "{{ server }}" + user: "{{ user }}" + password: "{{ password }}" + description: "{{ item.description }}" + expression: "{{ item.expression }}" + priority: "{{ item.priority }}" + with_items: template.ztriggers + when: template.ztriggers is defined + +#- debug: var=ctp_created_triggers + + diff --git a/roles/os_zabbix/tasks/create_user.yml b/roles/os_zabbix/tasks/create_user.yml new file mode 100644 index 000000000..1f752a9e1 --- /dev/null +++ b/roles/os_zabbix/tasks/create_user.yml @@ -0,0 +1,11 @@ +--- +- name: Update zabbix credentialss for a user + zbx_user: + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + alias: "{{ ozb_username }}" + passwd: "{{ ozb_new_password | default(ozb_password, true) }}" + register: user + +- debug: var=user.results diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml new file mode 100644 index 000000000..06c0a09fc --- /dev/null +++ b/roles/os_zabbix/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Main List all templates + zbx_template: + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + state: list + register: templates + +- debug: var=templates + +- include_vars: template_heartbeat.yml +- include_vars: template_os_linux.yml + +- name: Include Template Heartbeat + include: create_template.yml + vars: + template: "{{ g_template_heartbeat }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + +#- name: Include Template os_linux +# include: create_template.yml +# vars: +# template: "{{ g_template_os_linux }}" +# server: "{{ ozb_server }}" +# user: "{{ ozb_user }}" +# password: "{{ ozb_password }}" + diff --git a/roles/os_zabbix/vars/main.yml b/roles/os_zabbix/vars/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/vars/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/vars/template_heartbeat.yml b/roles/os_zabbix/vars/template_heartbeat.yml new file mode 100644 index 000000000..158d6c79a --- /dev/null +++ b/roles/os_zabbix/vars/template_heartbeat.yml @@ -0,0 +1,13 @@ +--- +g_template_heartbeat: + name: Template Heartbeat + zapplications: + - Heartbeat + zitems: + - name: Heartbeat Ping + application: Heartbeat + key: heartbeat.ping + ztriggers: + - description: 'Heartbeat.ping has failed on {HOST.NAME}' + expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' + priority: avg diff --git a/roles/os_zabbix/vars/template_host.yml b/roles/os_zabbix/vars/template_host.yml new file mode 100644 index 000000000..e7cc667cb --- /dev/null +++ b/roles/os_zabbix/vars/template_host.yml @@ -0,0 +1,27 @@ +--- +g_template_host: + params: + name: Template Host + host: Template Host + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Host + zitems: + - name: Host Ping + hostid: + key_: host.ping + type: 2 + value_type: 0 + output: extend + search: + key_: host.ping + ztriggers: + - description: 'Host ping has failed on {HOST.NAME}' + expression: '{Template Host:host.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Host ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_master.yml b/roles/os_zabbix/vars/template_master.yml new file mode 100644 index 000000000..5f9b41a4f --- /dev/null +++ b/roles/os_zabbix/vars/template_master.yml @@ -0,0 +1,27 @@ +--- +g_template_master: + params: + name: Template Master + host: Template Master + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Master + zitems: + - name: Master Etcd Ping + hostid: + key_: master.etcd.ping + type: 2 + value_type: 0 + output: extend + search: + key_: master.etcd.ping + ztriggers: + - description: 'Master Etcd ping has failed on {HOST.NAME}' + expression: '{Template Master:master.etcd.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Master Etcd ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_node.yml b/roles/os_zabbix/vars/template_node.yml new file mode 100644 index 000000000..98c343a24 --- /dev/null +++ b/roles/os_zabbix/vars/template_node.yml @@ -0,0 +1,27 @@ +--- +g_template_node: + params: + name: Template Node + host: Template Node + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Node + zitems: + - name: Kubelet Ping + hostid: + key_: kubelet.ping + type: 2 + value_type: 0 + output: extend + search: + key_: kubelet.ping + ztriggers: + - description: 'Kubelet ping has failed on {HOST.NAME}' + expression: '{Template Node:kubelet.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Kubelet ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml new file mode 100644 index 000000000..1cc928bce --- /dev/null +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -0,0 +1,143 @@ +--- +g_template_os_linux: + name: Template OS Linux + zapplications: + - Disk + - Memory + - Kernel + zitems: + - key: kernel.uname.sysname + application: Kernel + value_type: string + + - key: kernel.all.cpu.wait.total + application: Kernel + value_type: int + + - key: kernel.all.cpu.irq.hard + application: Kernel + value_type: int + + - key: kernel.all.cpu.idle + application: Kernel + value_type: int + + - key: kernel.uname.distro + application: Kernel + value_type: string + + - key: kernel.uname.nodename + application: Kernel + value_type: string + + - key: kernel.all.cpu.irq.soft + application: Kernel + value_type: int + + - key: kernel.all.load.15_minute + application: Kernel + value_type: float + + - key: kernel.all.cpu.sys + application: Kernel + value_type: int + + - key: kernel.all.load.5_minute + application: Kernel + value_type: float + + - key: mem.freemem + application: Memory + value_type: int + + - key: kernel.all.cpu.nice + application: Kernel + value_type: int + + - key: mem.util.bufmem + application: Memory + value_type: int + + - key: swap.used + application: Memory + value_type: int + + - key: kernel.all.load.1_minute + application: Kernel + value_type: float + + - key: kernel.uname.version + application: Kernel + value_type: string + + - key: swap.length + application: Memory + value_type: int + + - key: mem.physmem + application: Memory + value_type: int + + - key: kernel.all.uptime + application: Kernel + value_type: int + + - key: swap.free + application: Memory + value_type: int + + - key: mem.util.used + application: Memory + value_type: int + + - key: kernel.all.cpu.user + application: Kernel + value_type: int + + - key: kernel.uname.machine + application: Kernel + value_type: string + + - key: hinv.ncpu + application: Kernel + value_type: int + + - key: mem.util.cached + application: Memory + value_type: int + + - key: kernel.all.cpu.steal + application: Kernel + value_type: int + + - key: kernel.all.pswitch + application: Kernel + value_type: int + + - key: kernel.uname.release + application: Kernel + value_type: string + + - key: proc.nprocs + application: Kernel + value_type: int + + - key: filesys.avail + application: Disk + value_type: int + + - key: filesys.capacity + application: Disk + value_type: int + + - key: filesys.free + application: Disk + value_type: int + + - key: filesys.full + application: Disk + value_type: float + + - key: filesys.used + application: Disk + value_type: float diff --git a/roles/os_zabbix/vars/template_router.yml b/roles/os_zabbix/vars/template_router.yml new file mode 100644 index 000000000..4dae7da1e --- /dev/null +++ b/roles/os_zabbix/vars/template_router.yml @@ -0,0 +1,27 @@ +--- +g_template_router: + params: + name: Template Router + host: Template Router + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Router + zitems: + - name: Router Backends down + hostid: + key_: router.backends.down + type: 2 + value_type: 0 + output: extend + search: + key_: router.backends.down + ztriggers: + - description: 'Number of router backends down on {HOST.NAME}' + expression: '{Template Router:router.backends.down.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Number of router backends down on {HOST.NAME}' + expandExpression: True -- cgit v1.2.3 From 91eebb77744753bde5b4b83e7c7634ee47e5b859 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 27 Aug 2015 11:12:42 -0400 Subject: Revert "Zabbix API updates" --- filter_plugins/oo_zabbix_filters.py | 29 --- playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 51 +++++ playbooks/adhoc/zabbix_setup/create_template.yml | 57 +++++ playbooks/adhoc/zabbix_setup/create_user.yml | 31 +++ playbooks/adhoc/zabbix_setup/filter_plugins | 1 + playbooks/adhoc/zabbix_setup/roles | 1 + playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 38 ++++ .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 11 + .../adhoc/zabbix_setup/vars/template_host.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_master.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_node.yml | 27 +++ .../adhoc/zabbix_setup/vars/template_os_linux.yml | 90 ++++++++ .../adhoc/zabbix_setup/vars/template_router.yml | 27 +++ roles/lib_zabbix/README.md | 38 ---- roles/lib_zabbix/library/__init__.py | 0 roles/lib_zabbix/library/test.yml | 131 ----------- roles/lib_zabbix/library/zbx_application.py | 139 ------------ roles/lib_zabbix/library/zbx_discoveryrule.py | 177 --------------- roles/lib_zabbix/library/zbx_host.py | 163 -------------- roles/lib_zabbix/library/zbx_hostgroup.py | 116 ---------- roles/lib_zabbix/library/zbx_item.py | 173 --------------- roles/lib_zabbix/library/zbx_itemprototype.py | 241 --------------------- roles/lib_zabbix/library/zbx_mediatype.py | 168 -------------- roles/lib_zabbix/library/zbx_template.py | 133 ------------ roles/lib_zabbix/library/zbx_trigger.py | 174 --------------- roles/lib_zabbix/library/zbx_user.py | 174 --------------- roles/lib_zabbix/library/zbx_user_media.py | 240 -------------------- roles/lib_zabbix/library/zbx_usergroup.py | 208 ------------------ roles/os_zabbix/README.md | 40 ---- roles/os_zabbix/defaults/main.yml | 1 - roles/os_zabbix/handlers/main.yml | 1 - roles/os_zabbix/library/__init__.py | 0 roles/os_zabbix/library/get_drule.yml | 115 ++++++++++ roles/os_zabbix/library/test.yml | 131 +++++++++++ roles/os_zabbix/library/zbx_application.py | 135 ++++++++++++ roles/os_zabbix/library/zbx_discoveryrule.py | 177 +++++++++++++++ roles/os_zabbix/library/zbx_host.py | 163 ++++++++++++++ roles/os_zabbix/library/zbx_hostgroup.py | 116 ++++++++++ roles/os_zabbix/library/zbx_item.py | 170 +++++++++++++++ roles/os_zabbix/library/zbx_itemprototype.py | 241 +++++++++++++++++++++ roles/os_zabbix/library/zbx_mediatype.py | 149 +++++++++++++ roles/os_zabbix/library/zbx_template.py | 127 +++++++++++ roles/os_zabbix/library/zbx_trigger.py | 175 +++++++++++++++ roles/os_zabbix/library/zbx_user.py | 169 +++++++++++++++ roles/os_zabbix/library/zbx_usergroup.py | 160 ++++++++++++++ roles/os_zabbix/meta/main.yml | 9 - roles/os_zabbix/tasks/clean_zabbix.yml | 47 ---- roles/os_zabbix/tasks/create_template.yml | 56 ----- roles/os_zabbix/tasks/create_user.yml | 11 - roles/os_zabbix/tasks/main.yml | 30 --- roles/os_zabbix/vars/main.yml | 1 - roles/os_zabbix/vars/template_heartbeat.yml | 13 -- roles/os_zabbix/vars/template_host.yml | 27 --- roles/os_zabbix/vars/template_master.yml | 27 --- roles/os_zabbix/vars/template_node.yml | 27 --- roles/os_zabbix/vars/template_os_linux.yml | 143 ------------ roles/os_zabbix/vars/template_router.yml | 27 --- 57 files changed, 2416 insertions(+), 2764 deletions(-) create mode 100644 playbooks/adhoc/zabbix_setup/clean_zabbix.yml create mode 100644 playbooks/adhoc/zabbix_setup/create_template.yml create mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml create mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins create mode 120000 playbooks/adhoc/zabbix_setup/roles create mode 100644 playbooks/adhoc/zabbix_setup/setup_zabbix.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_host.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_master.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_node.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml create mode 100644 playbooks/adhoc/zabbix_setup/vars/template_router.yml delete mode 100644 roles/lib_zabbix/README.md delete mode 100644 roles/lib_zabbix/library/__init__.py delete mode 100644 roles/lib_zabbix/library/test.yml delete mode 100644 roles/lib_zabbix/library/zbx_application.py delete mode 100644 roles/lib_zabbix/library/zbx_discoveryrule.py delete mode 100644 roles/lib_zabbix/library/zbx_host.py delete mode 100644 roles/lib_zabbix/library/zbx_hostgroup.py delete mode 100644 roles/lib_zabbix/library/zbx_item.py delete mode 100644 roles/lib_zabbix/library/zbx_itemprototype.py delete mode 100644 roles/lib_zabbix/library/zbx_mediatype.py delete mode 100644 roles/lib_zabbix/library/zbx_template.py delete mode 100644 roles/lib_zabbix/library/zbx_trigger.py delete mode 100644 roles/lib_zabbix/library/zbx_user.py delete mode 100644 roles/lib_zabbix/library/zbx_user_media.py delete mode 100644 roles/lib_zabbix/library/zbx_usergroup.py delete mode 100644 roles/os_zabbix/README.md delete mode 100644 roles/os_zabbix/defaults/main.yml delete mode 100644 roles/os_zabbix/handlers/main.yml create mode 100644 roles/os_zabbix/library/__init__.py create mode 100644 roles/os_zabbix/library/get_drule.yml create mode 100644 roles/os_zabbix/library/test.yml create mode 100644 roles/os_zabbix/library/zbx_application.py create mode 100644 roles/os_zabbix/library/zbx_discoveryrule.py create mode 100644 roles/os_zabbix/library/zbx_host.py create mode 100644 roles/os_zabbix/library/zbx_hostgroup.py create mode 100644 roles/os_zabbix/library/zbx_item.py create mode 100644 roles/os_zabbix/library/zbx_itemprototype.py create mode 100644 roles/os_zabbix/library/zbx_mediatype.py create mode 100644 roles/os_zabbix/library/zbx_template.py create mode 100644 roles/os_zabbix/library/zbx_trigger.py create mode 100644 roles/os_zabbix/library/zbx_user.py create mode 100644 roles/os_zabbix/library/zbx_usergroup.py delete mode 100644 roles/os_zabbix/meta/main.yml delete mode 100644 roles/os_zabbix/tasks/clean_zabbix.yml delete mode 100644 roles/os_zabbix/tasks/create_template.yml delete mode 100644 roles/os_zabbix/tasks/create_user.yml delete mode 100644 roles/os_zabbix/tasks/main.yml delete mode 100644 roles/os_zabbix/vars/main.yml delete mode 100644 roles/os_zabbix/vars/template_heartbeat.yml delete mode 100644 roles/os_zabbix/vars/template_host.yml delete mode 100644 roles/os_zabbix/vars/template_master.yml delete mode 100644 roles/os_zabbix/vars/template_node.yml delete mode 100644 roles/os_zabbix/vars/template_os_linux.yml delete mode 100644 roles/os_zabbix/vars/template_router.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py index c44b874e8..a473993a2 100644 --- a/filter_plugins/oo_zabbix_filters.py +++ b/filter_plugins/oo_zabbix_filters.py @@ -59,17 +59,6 @@ class FilterModule(object): return data[zabbix_item]['params'] return None - @staticmethod - def oo_build_zabbix_collect(data, string, value): - ''' Build a list of dicts from a list of data matched on string attribute - ''' - rval = [] - for item in data: - if item[string] == value: - rval.append(item) - - return rval - @staticmethod def oo_build_zabbix_list_dict(values, string): ''' Build a list of dicts with string as key for each value @@ -79,22 +68,6 @@ class FilterModule(object): rval.append({string: value}) return rval - @staticmethod - def oo_remove_attr_from_list_dict(data, attr): - ''' Remove a specific attribute from a dict - ''' - attrs = [] - if isinstance(attr, str): - attrs.append(attr) - else: - attrs = attr - - for attribute in attrs: - for _entry in data: - _entry.pop(attribute, None) - - return data - def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -103,6 +76,4 @@ class FilterModule(object): "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid, "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, "create_data": self.create_data, - "oo_build_zabbix_collect": self.oo_build_zabbix_collect, - "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict, } diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml new file mode 100644 index 000000000..a31cbef65 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -0,0 +1,51 @@ +--- +- hosts: localhost + gather_facts: no + vars: + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: Admin + g_zpassword: zabbix + roles: + - ../../../roles/os_zabbix + post_tasks: + + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + name: 'Template Heartbeat' + register: templ_heartbeat + + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + name: 'Template App Zabbix Server' + register: templ_zabbix_server + + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + name: 'Template App Zabbix Agent' + register: templ_zabbix_agent + + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + register: templates + + - debug: var=templ_heartbeat.results + + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: absent + with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" + when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml new file mode 100644 index 000000000..50fff53b2 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_template.yml @@ -0,0 +1,57 @@ +--- +- debug: var=ctp_template + +- name: Create Template + zbx_template: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + name: "{{ ctp_template.name }}" + register: ctp_created_template + +- debug: var=ctp_created_template + +#- name: Create Application +# zbxapi: +# server: "{{ ctp_zserver }}" +# user: "{{ ctp_zuser }}" +# password: "{{ ctp_zpassword }}" +# zbx_class: Application +# state: present +# params: +# name: "{{ ctp_template.application.name}}" +# hostid: "{{ ctp_created_template.results[0].templateid }}" +# search: +# name: "{{ ctp_template.application.name}}" +# register: ctp_created_application + +#- debug: var=ctp_created_application + +- name: Create Items + zbx_item: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + key: "{{ item.key }}" + name: "{{ item.name | default(item.key, true) }}" + value_type: "{{ item.value_type | default('int') }}" + template_name: "{{ ctp_template.name }}" + with_items: ctp_template.zitems + register: ctp_created_items + +#- debug: var=ctp_created_items + +- name: Create Triggers + zbx_trigger: + server: "{{ ctp_zserver }}" + user: "{{ ctp_zuser }}" + password: "{{ ctp_zpassword }}" + description: "{{ item.description }}" + expression: "{{ item.expression }}" + priority: "{{ item.priority }}" + with_items: ctp_template.ztriggers + when: ctp_template.ztriggers is defined + +#- debug: var=ctp_created_triggers + + diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml new file mode 100644 index 000000000..dd74798b7 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_user.yml @@ -0,0 +1,31 @@ +--- +# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' +# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: admin + g_zpassword: "{{ cli_password }}" + roles: + - ../../../roles/os_zabbix + post_tasks: + - zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + register: users + + - debug: var=users + + - name: Update zabbix creds for admin + zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + alias: Admin + passwd: "{{ cli_new_password | default(g_zpassword, true) }}" diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml new file mode 100644 index 000000000..1729194b5 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml @@ -0,0 +1,38 @@ +--- +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: Admin + g_zpassword: zabbix + roles: + - ../../../roles/os_zabbix + post_tasks: + - zbx_template: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + register: templates + + - debug: var=templates + + - name: Include Template + include: create_template.yml + vars: + ctp_template: "{{ g_template_heartbeat }}" + ctp_zserver: "{{ g_zserver }}" + ctp_zuser: "{{ g_zuser }}" + ctp_zpassword: "{{ g_zpassword }}" + + - name: Include Template + include: create_template.yml + vars: + ctp_template: "{{ g_template_os_linux }}" + ctp_zserver: "{{ g_zserver }}" + ctp_zuser: "{{ g_zuser }}" + ctp_zpassword: "{{ g_zpassword }}" + diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml new file mode 100644 index 000000000..22cc75554 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml @@ -0,0 +1,11 @@ +--- +g_template_heartbeat: + name: Template Heartbeat + zitems: + - name: Heartbeat Ping + hostid: + key: heartbeat.ping + ztriggers: + - description: 'Heartbeat.ping has failed on {HOST.NAME}' + expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' + priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml new file mode 100644 index 000000000..e7cc667cb --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_host.yml @@ -0,0 +1,27 @@ +--- +g_template_host: + params: + name: Template Host + host: Template Host + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Host + zitems: + - name: Host Ping + hostid: + key_: host.ping + type: 2 + value_type: 0 + output: extend + search: + key_: host.ping + ztriggers: + - description: 'Host ping has failed on {HOST.NAME}' + expression: '{Template Host:host.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Host ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml new file mode 100644 index 000000000..5f9b41a4f --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_master.yml @@ -0,0 +1,27 @@ +--- +g_template_master: + params: + name: Template Master + host: Template Master + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Master + zitems: + - name: Master Etcd Ping + hostid: + key_: master.etcd.ping + type: 2 + value_type: 0 + output: extend + search: + key_: master.etcd.ping + ztriggers: + - description: 'Master Etcd ping has failed on {HOST.NAME}' + expression: '{Template Master:master.etcd.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Master Etcd ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml new file mode 100644 index 000000000..98c343a24 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_node.yml @@ -0,0 +1,27 @@ +--- +g_template_node: + params: + name: Template Node + host: Template Node + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Node + zitems: + - name: Kubelet Ping + hostid: + key_: kubelet.ping + type: 2 + value_type: 0 + output: extend + search: + key_: kubelet.ping + ztriggers: + - description: 'Kubelet ping has failed on {HOST.NAME}' + expression: '{Template Node:kubelet.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Kubelet ping has failed on*' + expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml new file mode 100644 index 000000000..9cc038ffa --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml @@ -0,0 +1,90 @@ +--- +g_template_os_linux: + name: Template OS Linux + zitems: + - key: kernel.uname.sysname + value_type: string + + - key: kernel.all.cpu.wait.total + value_type: int + + - key: kernel.all.cpu.irq.hard + value_type: int + + - key: kernel.all.cpu.idle + value_type: int + + - key: kernel.uname.distro + value_type: string + + - key: kernel.uname.nodename + value_type: string + + - key: kernel.all.cpu.irq.soft + value_type: int + + - key: kernel.all.load.15_minute + value_type: float + + - key: kernel.all.cpu.sys + value_type: int + + - key: kernel.all.load.5_minute + value_type: float + + - key: mem.freemem + value_type: int + + - key: kernel.all.cpu.nice + value_type: int + + - key: mem.util.bufmem + value_type: int + + - key: swap.used + value_type: int + + - key: kernel.all.load.1_minute + value_type: float + + - key: kernel.uname.version + value_type: string + + - key: swap.length + value_type: int + + - key: mem.physmem + value_type: int + + - key: kernel.all.uptime + value_type: int + + - key: swap.free + value_type: int + + - key: mem.util.used + value_type: int + + - key: kernel.all.cpu.user + value_type: int + + - key: kernel.uname.machine + value_type: string + + - key: hinv.ncpu + value_type: int + + - key: mem.util.cached + value_type: int + + - key: kernel.all.cpu.steal + value_type: int + + - key: kernel.all.pswitch + value_type: int + + - key: kernel.uname.release + value_type: string + + - key: proc.nprocs + value_type: int diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml new file mode 100644 index 000000000..4dae7da1e --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/vars/template_router.yml @@ -0,0 +1,27 @@ +--- +g_template_router: + params: + name: Template Router + host: Template Router + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Router + zitems: + - name: Router Backends down + hostid: + key_: router.backends.down + type: 2 + value_type: 0 + output: extend + search: + key_: router.backends.down + ztriggers: + - description: 'Number of router backends down on {HOST.NAME}' + expression: '{Template Router:router.backends.down.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Number of router backends down on {HOST.NAME}' + expandExpression: True diff --git a/roles/lib_zabbix/README.md b/roles/lib_zabbix/README.md deleted file mode 100644 index 69debc698..000000000 --- a/roles/lib_zabbix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -zabbix -========= - -Automate zabbix tasks. - -Requirements ------------- - -This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. - -Role Variables --------------- - -None - -Dependencies ------------- - -This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. - -Example Playbook ----------------- - - - zbx_host: - server: zab_server - user: zab_user - password: zab_password - name: 'myhost' - -License -------- - -ASL 2.0 - -Author Information ------------------- - -OpenShift operations, Red Hat, Inc diff --git a/roles/lib_zabbix/library/__init__.py b/roles/lib_zabbix/library/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/lib_zabbix/library/test.yml b/roles/lib_zabbix/library/test.yml deleted file mode 100644 index cedace1a0..000000000 --- a/roles/lib_zabbix/library/test.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -# This is a test playbook to create one of each of the zabbix ansible modules. -# ensure that the zbxapi module is installed -# ansible-playbook test.yml -- name: Test zabbix ansible module - hosts: localhost - gather_facts: no - vars: - zbx_server: http://localhost:8080/zabbix/api_jsonrpc.php - zbx_user: Admin - zbx_password: zabbix - - pre_tasks: - - name: Create a template - zbx_template: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test template' - register: template_output - - - debug: var=template_output - - - name: Create a discoveryrule - zbx_discoveryrule: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: test discoverule - key: test_listener - template_name: test template - lifetime: 14 - register: discoveryrule - - - debug: var=discoveryrule - - - name: Create an itemprototype - zbx_itemprototype: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test itemprototype on {#TEST_LISTENER}' - key: 'test[{#TEST_LISTENER}]' - template_name: test template - discoveryrule_name: test discoverule - register: itemproto - - - debug: var=itemproto - - - name: Create an application - zbx_application: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test App' - template_name: "test template" - register: item_output - - - name: Create an item - zbx_item: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test item' - key: 'kenny.item.1' - applications: - - 'Test App' - template_name: "test template" - register: item_output - - - debug: var=item_output - - - name: Create an trigger - zbx_trigger: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - expression: '{test template:kenny.item.1.last()}>2' - description: 'Kenny desc' - register: trigger_output - - - debug: var=trigger_output - - - name: Create a hostgroup - zbx_hostgroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny hostgroup' - register: hostgroup_output - - - debug: var=hostgroup_output - - - name: Create a host - zbx_host: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny host' - template_names: - - test template - hostgroup_names: - - kenny hostgroup - register: host_output - - - debug: var=host_output - - - name: Create a usergroup - zbx_usergroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: kenny usergroup - rights: - - 'kenny hostgroup': rw - register: usergroup_output - - - debug: var=usergroup_output - - - name: Create a user - zbx_user: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - alias: kenny user - passwd: zabbix - usergroups: - - kenny usergroup - register: user_output - - - debug: var=user_output diff --git a/roles/lib_zabbix/library/zbx_application.py b/roles/lib_zabbix/library/zbx_application.py deleted file mode 100644 index 01df1a98e..000000000 --- a/roles/lib_zabbix/library/zbx_application.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for application -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix application ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append(content['result'][0]['templateid']) - return template_ids - -def main(): - ''' Ansible module for application - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - template_name=dict(default=None, type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], - module.params['user'], - module.params['password'], - module.params['debug'])) - - #Set the instance and the application for the rest of the calls - zbx_class_name = 'application' - idname = 'applicationid' - aname = module.params['name'] - state = module.params['state'] - # get a applicationid, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': aname}, - 'selectHost': 'hostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], - 'name': aname, - } - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - - if content.has_key('error'): - module.exit_json(failed=True, changed=False, results=content['error'], state="present") - - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_discoveryrule.py b/roles/lib_zabbix/library/zbx_discoveryrule.py deleted file mode 100644 index 56b87fecc..000000000 --- a/roles/lib_zabbix/library/zbx_discoveryrule.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(vtype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'JMX': 16, - } - - for typ in _types.keys(): - if vtype in typ or vtype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'discoveryrule' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - #'selectDServices': 'extend', - #'selectDChecks': 'extend', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'lifetime': module.params['lifetime'], - 'type': get_type(module.params['ztype']), - } - if params['type'] in [2, 5, 7, 11]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py deleted file mode 100644 index 12c5f3456..000000000 --- a/roles/lib_zabbix/library/zbx_host.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix host ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_group_ids(zapi, hostgroup_names): - ''' - get hostgroups - ''' - # Fetch groups by name - group_ids = [] - for hgr in hostgroup_names: - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) - if content.has_key('result'): - group_ids.append({'groupid': content['result'][0]['groupid']}) - - return group_ids - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append({'templateid': content['result'][0]['templateid']}) - return template_ids - -def main(): - ''' - Ansible module for zabbix host - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - hostgroup_names=dict(default=[], type='list'), - template_names=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - interfaces=dict(default=None, type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'host' - idname = "hostid" - hname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': hname}, - 'selectGroups': 'groupid', - 'selectParentTemplates': 'templateid', - 'selectInterfaces': 'interfaceid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent - 'main': 1, # default interface? 1 = true - 'useip': 1, # default interface? 1 = true - 'ip': '127.0.0.1', # default interface? 1 = true - 'dns': '', # dns for host - 'port': '10050', # port for interface? 10050 - }] - params = {'host': hname, - 'groups': get_group_ids(zapi, module.params['hostgroup_names']), - 'templates': get_template_ids(zapi, module.params['template_names']), - 'interfaces': ifs, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_hostgroup.py b/roles/lib_zabbix/library/zbx_hostgroup.py deleted file mode 100644 index a1eb875d4..000000000 --- a/roles/lib_zabbix/library/zbx_hostgroup.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -''' Ansible module for hostgroup -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix hostgroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' ansible module for hostgroup - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'hostgroup' - idname = "groupid" - hname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': hname}, - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': hname} - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py deleted file mode 100644 index 64dbb976f..000000000 --- a/roles/lib_zabbix/library/zbx_item.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for zabbix items -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix item ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - if isinstance(application_names, str): - application_names = [application_names] - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - ansible zabbix module for zbx_item - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - template_name=dict(default=None, type='str'), - zabbix_type=dict(default=2, type='int'), - value_type=dict(default='int', type='str'), - applications=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'item' - idname = "itemid" - state = module.params['state'] - key = module.params['key'] - template_name = module.params['template_name'] - - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - templateid = None - if content['result']: - templateid = content['result'][0]['templateid'] - else: - module.exit_json(changed=False, - results='Error: Could find template with name %s for item.' % template_name, - state="Unkown") - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'key_': key}, - 'selectApplications': 'applicationid', - }) - - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': module.params.get('name', module.params['key']), - 'key_': key, - 'hostid': templateid, - 'type': module.params['zabbix_type'], - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py deleted file mode 100644 index f0eb6bbbd..000000000 --- a/roles/lib_zabbix/library/zbx_itemprototype.py +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rule_id(zapi, discoveryrule_name): - '''get a discoveryrule by name - ''' - content = zapi.get_content('discoveryrule', - 'get', - {'search': {'name': discoveryrule_name}, - 'output': 'extend', - }) - if not content['result']: - return None - return content['result'][0]['itemid'] - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(ztype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'aggregate': 8, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'calculated': 15, - 'JMX': 16, - } - - for typ in _types.keys(): - if ztype in typ or ztype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_status(status): - ''' Determine status - ''' - _status = 0 - if status == 'disabled': - _status = 1 - elif status == 'unsupported': - _status = 3 - - return _status - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - value_type=dict(default='float', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - status=dict(default='enabled', type='str'), - discoveryrule_name=dict(default=None, type='str'), - applications=dict(default=[], type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'itemprototype' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - 'selectApplications': 'applicationid', - 'selectDiscoveryRule': 'itemid', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), - 'type': get_type(module.params['ztype']), - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - if params['type'] in [2, 5, 7, 8, 11, 15]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'ruleid': - if value != zab_results['discoveryRule']['itemid']: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_mediatype.py b/roles/lib_zabbix/library/zbx_mediatype.py deleted file mode 100644 index b8dcaf7aa..000000000 --- a/roles/lib_zabbix/library/zbx_mediatype.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for mediatype -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix mediatype ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_mtype(mtype): - ''' - Transport used by the media type. - Possible values: - 0 - email; - 1 - script; - 2 - SMS; - 3 - Jabber; - 100 - Ez Texting. - ''' - mtype = mtype.lower() - media_type = None - if mtype == 'script': - media_type = 1 - elif mtype == 'sms': - media_type = 2 - elif mtype == 'jabber': - media_type = 3 - elif mtype == 'script': - media_type = 100 - else: - media_type = 0 - - return media_type - -def main(): - ''' - Ansible zabbix module for mediatype - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - description=dict(default=None, type='str'), - mtype=dict(default=None, type='str'), - smtp_server=dict(default=None, type='str'), - smtp_helo=dict(default=None, type='str'), - smtp_email=dict(default=None, type='str'), - passwd=dict(default=None, type='str'), - path=dict(default=None, type='str'), - username=dict(default=None, type='str'), - status=dict(default='enabled', type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'mediatype' - idname = "mediatypeid" - description = module.params['description'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - status = 1 - if module.params['status']: - status = 0 - params = {'description': description, - 'type': get_mtype(module.params['mtype']), - 'smtp_server': module.params['smtp_server'], - 'smtp_helo': module.params['smtp_helo'], - 'smtp_email': module.params['smtp_email'], - 'passwd': module.params['passwd'], - 'exec_path': module.params['path'], - 'username': module.params['username'], - 'status': status, - } - - # Remove any None valued params - _ = [params.pop(key, None) for key in params.keys() if params[key] is None] - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - - if content.has_key('error'): - module.exit_json(failed=True, changed=False, results=content['error'], state="present") - - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and \ - zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_template.py b/roles/lib_zabbix/library/zbx_template.py deleted file mode 100644 index f86f22003..000000000 --- a/roles/lib_zabbix/library/zbx_template.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for template -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix template ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' Ansible module for template - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - zbc = ZabbixConnection(module.params['server'], - module.params['user'], - module.params['password'], - module.params['debug']) - zapi = ZabbixAPI(zbc) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'template' - idname = 'templateid' - tname = module.params['name'] - state = module.params['state'] - # get a template, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': tname}, - 'selectParentTemplates': 'templateid', - 'selectGroups': 'groupid', - 'selectApplications': 'applicationid', - 'selectDiscoveries': 'extend', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - if not tname: - module.exit_json(failed=True, - changed=False, - results='Must specifiy a template name.', - state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'groups': module.params.get('groups', [{'groupid': '1'}]), - 'host': tname, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py deleted file mode 100644 index 6f5392437..000000000 --- a/roles/lib_zabbix/library/zbx_trigger.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix triggers -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix trigger ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_priority(priority): - ''' determine priority - ''' - prior = 0 - if 'info' in priority: - prior = 1 - elif 'warn' in priority: - prior = 2 - elif 'avg' == priority or 'ave' in priority: - prior = 3 - elif 'high' in priority: - prior = 4 - elif 'dis' in priority: - prior = 5 - - return prior - -def get_deps(zapi, deps): - ''' get trigger dependencies - ''' - results = [] - for desc in deps: - content = zapi.get_content('trigger', - 'get', - {'search': {'description': desc}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if content.has_key('result'): - results.append({'triggerid': content['result'][0]['triggerid']}) - - return results - -def main(): - ''' - Create a trigger in zabbix - - Example: - "params": { - "description": "Processor load is too high on {HOST.NAME}", - "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", - "dependencies": [ - { - "triggerid": "14062" - } - ] - }, - - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - expression=dict(default=None, type='str'), - description=dict(default=None, type='str'), - dependencies=dict(default=[], type='list'), - priority=dict(default='avg', type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'trigger' - idname = "triggerid" - state = module.params['state'] - description = module.params['description'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'description': description}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'description': description, - 'expression': module.params['expression'], - 'dependencies': get_deps(zapi, module.params['dependencies']), - 'priority': get_priority(module.params['priority']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py deleted file mode 100644 index 220caa6f1..000000000 --- a/roles/lib_zabbix/library/zbx_user.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix users -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix user ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_usergroups(zapi, usergroups): - ''' Get usergroups - ''' - ugroups = [] - for ugr in usergroups: - content = zapi.get_content('usergroup', - 'get', - {'search': {'name': ugr}, - #'selectUsers': 'userid', - #'getRights': 'extend' - }) - if content['result']: - ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) - - return ugroups or None - -def get_usertype(user_type): - ''' - Determine zabbix user account type - ''' - if not user_type: - return None - - utype = 1 - if 'super' in user_type: - utype = 3 - elif 'admin' in user_type or user_type == 'admin': - utype = 2 - - return utype - -def main(): - ''' - ansible zabbix module for users - ''' - - ##def user(self, name, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - alias=dict(default=None, type='str'), - name=dict(default=None, type='str'), - surname=dict(default=None, type='str'), - user_type=dict(default=None, type='str'), - passwd=dict(default=None, type='str'), - user_groups=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, password, module.params['debug'])) - - ## before we can create a user media and users with media types we need media - zbx_class_name = 'user' - idname = "userid" - alias = module.params['alias'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'output': 'extend', - 'search': {'alias': alias}, - "selectUsrgrps": 'usergrpid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content) or len(content['result']) == 0: - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'alias': alias, - 'passwd': module.params['passwd'], - 'usrgrps': get_usergroups(zapi, module.params['user_groups']), - 'name': module.params['name'], - 'surname': module.params['surname'], - 'type': get_usertype(module.params['user_type']), - } - - # Remove any None valued params - _ = [params.pop(key, None) for key in params.keys() if params[key] is None] - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'passwd': - differences[key] = value - - elif key == 'usrgrps': - # this must be done as a list of ordered dictionaries fails comparison - if not all([True for _ in zab_results[key][0] if _ in value[0]]): - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py deleted file mode 100644 index f590c58fe..000000000 --- a/roles/lib_zabbix/library/zbx_user_media.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for user media -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix user media ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_mtype(zapi, mtype): - '''Get mediatype - - If passed an int, return it as the mediatypeid - if its a string, then try to fetch through a description - ''' - if isinstance(mtype, int): - return mtype - try: - return int(mtype) - except ValueError: - pass - - content = zapi.get_content('mediatype', 'get', {'search': {'description': mtype}}) - if content.has_key['result'] and content['result']: - return content['result'][0]['mediatypeid'] - - return None - -def get_user(zapi, user): - ''' Get userids from user aliases - ''' - content = zapi.get_content('user', 'get', {'search': {'alias': user}}) - if content['result']: - return content['result'][0] - - return None - -def get_severity(severity): - ''' determine severity - ''' - if isinstance(severity, int) or \ - isinstance(severity, str): - return severity - - val = 0 - sev_map = { - 'not': 2**0, - 'inf': 2**1, - 'war': 2**2, - 'ave': 2**3, - 'avg': 2**3, - 'hig': 2**4, - 'dis': 2**5, - } - for level in severity: - val |= sev_map[level[:3].lower()] - return val - -def get_zbx_user_query_data(zapi, user_name): - ''' If name exists, retrieve it, and build query params. - ''' - query = {} - if user_name: - zbx_user = get_user(zapi, user_name) - query = {'userids': zbx_user['userid']} - - return query - -def find_media(medias, user_media): - ''' Find the user media in the list of medias - ''' - for media in medias: - if all([media[key] == user_media[key] for key in user_media.keys()]): - return media - return None - -def get_active(in_active): - '''Determine active value - ''' - active = 1 - if in_active: - active = 0 - - return active - -def main(): - ''' - Ansible zabbix module for mediatype - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - active=dict(default=False, type='bool'), - medias=dict(default=None, type='list'), - mediaid=dict(default=None, type='int'), - mediatype=dict(default=None, type='str'), - mediatype_desc=dict(default=None, type='str'), - #d-d,hh:mm-hh:mm;d-d,hh:mm-hh:mm... - period=dict(default=None, type='str'), - sendto=dict(default=None, type='str'), - severity=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'user' - idname = "mediaid" - state = module.params['state'] - - # User media is fetched through the usermedia.get - zbx_user_query = get_zbx_user_query_data(zapi, module.params['name']) - content = zapi.get_content('usermedia', 'get', zbx_user_query) - - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content) or len(content['result']) == 0: - module.exit_json(changed=False, state="absent") - - # TODO: Do we remove all the queried results? This could be catastrophic or desired. - #ids = [med[idname] for med in content['result']] - ids = [content['result'][0][idname]] - content = zapi.get_content(zbx_class_name, 'deletemedia', ids) - - if content.has_key('error'): - module.exit_json(changed=False, results=content['error'], state="absent") - - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - active = get_active(module.params['active']) - mtypeid = None - if module.params['mediatype']: - mtypeid = get_mtype(zapi, module.params['mediatype']) - elif module.params['mediatype_desc']: - mtypeid = get_mtype(zapi, module.params['mediatype_desc']) - - medias = module.params['medias'] - if medias == None: - medias = [{'mediatypeid': mtypeid, - 'sendto': module.params['sendto'], - 'active': active, - 'severity': int(get_severity(module.params['severity'])), - 'period': module.params['period'], - }] - - params = {'users': [zbx_user_query], - 'medias': medias, - 'output': 'extend', - } - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'addmedia', params) - - if content.has_key('error'): - module.exit_json(failed=True, changed=False, results=content['error'], state="present") - - module.exit_json(changed=True, results=content['result'], state='present') - - # mediaid signifies an update - # If user params exists, check to see if they already exist in zabbix - # if they exist, then return as no update - # elif they do not exist, then take user params only - differences = {'medias': [], 'users': {}} - for media in params['medias']: - m_result = find_media(content['result'], media) - if not m_result: - differences['medias'].append(media) - - if not differences['medias']: - module.exit_json(changed=False, results=content['result'], state="present") - - for user in params['users']: - differences['users']['userid'] = user['userids'] - - # We have differences and need to update - content = zapi.get_content(zbx_class_name, 'updatemedia', differences) - - if content.has_key('error'): - module.exit_json(failed=True, changed=False, results=content['error'], state="present") - - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/lib_zabbix/library/zbx_usergroup.py b/roles/lib_zabbix/library/zbx_usergroup.py deleted file mode 100644 index 11aef106c..000000000 --- a/roles/lib_zabbix/library/zbx_usergroup.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env python -''' -zabbix ansible module for usergroups -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix usergroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rights(zapi, rights): - '''Get rights - ''' - if rights == None: - return None - - perms = [] - for right in rights: - hstgrp = right.keys()[0] - perm = right.values()[0] - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) - if content['result']: - permission = 0 - if perm == 'ro': - permission = 2 - elif perm == 'rw': - permission = 3 - perms.append({'id': content['result'][0]['groupid'], - 'permission': permission}) - return perms - -def get_gui_access(access): - ''' Return the gui_access for a usergroup - ''' - access = access.lower() - if access == 'internal': - return 1 - elif access == 'disabled': - return 2 - - return 0 - -def get_debug_mode(mode): - ''' Return the debug_mode for a usergroup - ''' - mode = mode.lower() - if mode == 'enabled': - return 1 - - return 0 - -def get_user_status(status): - ''' Return the user_status for a usergroup - ''' - status = status.lower() - if status == 'enabled': - return 0 - - return 1 - - -#def get_userids(zapi, users): -# ''' Get userids from user aliases -# ''' -# if not users: -# return None -# -# userids = [] -# for alias in users: -# content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) -# if content['result']: -# userids.append(content['result'][0]['userid']) -# -# return userids - -def main(): - ''' Ansible module for usergroup - ''' - - ##def usergroup(self, name, rights=None, users=None, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), - password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), - debug_mode=dict(default='disabled', type='str'), - gui_access=dict(default='default', type='str'), - status=dict(default='enabled', type='str'), - name=dict(default=None, type='str', required=True), - rights=dict(default=None, type='list'), - #users=dict(default=None, type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], - module.params['user'], - module.params['password'], - module.params['debug'])) - - zbx_class_name = 'usergroup' - idname = "usrgrpid" - uname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': uname}, - 'selectUsers': 'userid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - if not uname: - module.exit_json(failed=True, changed=False, results='Need to pass in a user.', state="error") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - - params = {'name': uname, - 'rights': get_rights(zapi, module.params['rights']), - 'users_status': get_user_status(module.params['status']), - 'gui_access': get_gui_access(module.params['gui_access']), - 'debug_mode': get_debug_mode(module.params['debug_mode']), - #'userids': get_userids(zapi, module.params['users']), - } - - _ = [params.pop(key, None) for key in params.keys() if params[key] == None] - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'rights': - differences['rights'] = value - - #elif key == 'userids' and zab_results.has_key('users'): - #if zab_results['users'] != value: - #differences['userids'] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/README.md b/roles/os_zabbix/README.md deleted file mode 100644 index ac3dc2833..000000000 --- a/roles/os_zabbix/README.md +++ /dev/null @@ -1,40 +0,0 @@ -os_zabbix -========= - -Automate zabbix tasks. - -Requirements ------------- - -This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. - -Role Variables --------------- - -zab_server -zab_username -zab_password - -Dependencies ------------- - -This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. - -Example Playbook ----------------- - - - zbx_host: - server: zab_server - user: zab_user - password: zab_password - name: 'myhost' - -License -------- - -ASL 2.0 - -Author Information ------------------- - -OpenShift operations, Red Hat, Inc diff --git a/roles/os_zabbix/defaults/main.yml b/roles/os_zabbix/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/os_zabbix/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/os_zabbix/handlers/main.yml b/roles/os_zabbix/handlers/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/os_zabbix/handlers/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/roles/os_zabbix/library/get_drule.yml b/roles/os_zabbix/library/get_drule.yml new file mode 100644 index 000000000..a3e39f535 --- /dev/null +++ b/roles/os_zabbix/library/get_drule.yml @@ -0,0 +1,115 @@ +--- +# This is a test playbook to create one of each of the zabbix ansible modules. +# ensure that the zbxapi module is installed +# ansible-playbook test.yml +- name: Test zabbix ansible module + hosts: localhost + gather_facts: no + vars: +#zbx_server: https://localhost/zabbix/api_jsonrpc.php +#zbx_user: Admin +#zbx_password: zabbix + + pre_tasks: + - name: Template Discovery rules + zbx_template: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'Template App HaProxy' + state: list + register: template_output + + - debug: var=template_output + + - name: Discovery rules + zbx_discovery_rule: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'haproxy.discovery sender' + state: list + register: drule + + - debug: var=drule + +# - name: Create an application +# zbx_application: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# name: 'Test App' +# template_name: "test template" +# register: item_output +# +# - name: Create an item +# zbx_item: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# name: 'test item' +# key: 'kenny.item.1' +# applications: +# - 'Test App' +# template_name: "test template" +# register: item_output +# +# - debug: var=item_output +# +# - name: Create an trigger +# zbx_trigger: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# expression: '{test template:kenny.item.1.last()}>2' +# description: 'Kenny desc' +# register: trigger_output +# +# - debug: var=trigger_output +# +# - name: Create a hostgroup +# zbx_hostgroup: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# name: 'kenny hostgroup' +# register: hostgroup_output +# +# - debug: var=hostgroup_output +# +# - name: Create a host +# zbx_host: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# name: 'kenny host' +# template_names: +# - test template +# hostgroup_names: +# - kenny hostgroup +# register: host_output +# +# - debug: var=host_output +# +# - name: Create a usergroup +# zbx_usergroup: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# name: kenny usergroup +# rights: +# - 'kenny hostgroup': rw +# register: usergroup_output +# +# - debug: var=usergroup_output +# +# - name: Create a user +# zbx_user: +# server: "{{ zbx_server }}" +# user: "{{ zbx_user }}" +# password: "{{ zbx_password }}" +# alias: kwoodson +# state: list +# register: user_output +# +# - debug: var=user_output diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml new file mode 100644 index 000000000..cedace1a0 --- /dev/null +++ b/roles/os_zabbix/library/test.yml @@ -0,0 +1,131 @@ +--- +# This is a test playbook to create one of each of the zabbix ansible modules. +# ensure that the zbxapi module is installed +# ansible-playbook test.yml +- name: Test zabbix ansible module + hosts: localhost + gather_facts: no + vars: + zbx_server: http://localhost:8080/zabbix/api_jsonrpc.php + zbx_user: Admin + zbx_password: zabbix + + pre_tasks: + - name: Create a template + zbx_template: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test template' + register: template_output + + - debug: var=template_output + + - name: Create a discoveryrule + zbx_discoveryrule: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: test discoverule + key: test_listener + template_name: test template + lifetime: 14 + register: discoveryrule + + - debug: var=discoveryrule + + - name: Create an itemprototype + zbx_itemprototype: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'Test itemprototype on {#TEST_LISTENER}' + key: 'test[{#TEST_LISTENER}]' + template_name: test template + discoveryrule_name: test discoverule + register: itemproto + + - debug: var=itemproto + + - name: Create an application + zbx_application: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'Test App' + template_name: "test template" + register: item_output + + - name: Create an item + zbx_item: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'test item' + key: 'kenny.item.1' + applications: + - 'Test App' + template_name: "test template" + register: item_output + + - debug: var=item_output + + - name: Create an trigger + zbx_trigger: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + expression: '{test template:kenny.item.1.last()}>2' + description: 'Kenny desc' + register: trigger_output + + - debug: var=trigger_output + + - name: Create a hostgroup + zbx_hostgroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny hostgroup' + register: hostgroup_output + + - debug: var=hostgroup_output + + - name: Create a host + zbx_host: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: 'kenny host' + template_names: + - test template + hostgroup_names: + - kenny hostgroup + register: host_output + + - debug: var=host_output + + - name: Create a usergroup + zbx_usergroup: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + name: kenny usergroup + rights: + - 'kenny hostgroup': rw + register: usergroup_output + + - debug: var=usergroup_output + + - name: Create a user + zbx_user: + server: "{{ zbx_server }}" + user: "{{ zbx_user }}" + password: "{{ zbx_password }}" + alias: kenny user + passwd: zabbix + usergroups: + - kenny usergroup + register: user_output + + - debug: var=user_output diff --git a/roles/os_zabbix/library/zbx_application.py b/roles/os_zabbix/library/zbx_application.py new file mode 100644 index 000000000..5d4acf72d --- /dev/null +++ b/roles/os_zabbix/library/zbx_application.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +''' +Ansible module for application +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix application ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append(content['result'][0]['templateid']) + return template_ids + +def main(): + ''' Ansible module for application + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + template_name=dict(default=None, type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the application for the rest of the calls + zbx_class_name = 'application' + idname = 'applicationid' + aname = module.params['name'] + state = module.params['state'] + # get a applicationid, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': aname}, + 'selectHost': 'hostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], + 'name': aname, + } + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_discoveryrule.py b/roles/os_zabbix/library/zbx_discoveryrule.py new file mode 100644 index 000000000..56b87fecc --- /dev/null +++ b/roles/os_zabbix/library/zbx_discoveryrule.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(vtype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'JMX': 16, + } + + for typ in _types.keys(): + if vtype in typ or vtype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'discoveryrule' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + #'selectDServices': 'extend', + #'selectDChecks': 'extend', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'lifetime': module.params['lifetime'], + 'type': get_type(module.params['ztype']), + } + if params['type'] in [2, 5, 7, 11]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py new file mode 100644 index 000000000..12c5f3456 --- /dev/null +++ b/roles/os_zabbix/library/zbx_host.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +''' +Zabbix host ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_group_ids(zapi, hostgroup_names): + ''' + get hostgroups + ''' + # Fetch groups by name + group_ids = [] + for hgr in hostgroup_names: + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) + if content.has_key('result'): + group_ids.append({'groupid': content['result'][0]['groupid']}) + + return group_ids + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append({'templateid': content['result'][0]['templateid']}) + return template_ids + +def main(): + ''' + Ansible module for zabbix host + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + hostgroup_names=dict(default=[], type='list'), + template_names=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + interfaces=dict(default=None, type='list'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'host' + idname = "hostid" + hname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': hname}, + 'selectGroups': 'groupid', + 'selectParentTemplates': 'templateid', + 'selectInterfaces': 'interfaceid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent + 'main': 1, # default interface? 1 = true + 'useip': 1, # default interface? 1 = true + 'ip': '127.0.0.1', # default interface? 1 = true + 'dns': '', # dns for host + 'port': '10050', # port for interface? 10050 + }] + params = {'host': hname, + 'groups': get_group_ids(zapi, module.params['hostgroup_names']), + 'templates': get_template_ids(zapi, module.params['template_names']), + 'interfaces': ifs, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py new file mode 100644 index 000000000..a1eb875d4 --- /dev/null +++ b/roles/os_zabbix/library/zbx_hostgroup.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +''' Ansible module for hostgroup +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix hostgroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' ansible module for hostgroup + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'hostgroup' + idname = "groupid" + hname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': hname}, + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': hname} + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py new file mode 100644 index 000000000..45ba6c2b0 --- /dev/null +++ b/roles/os_zabbix/library/zbx_item.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +''' + Ansible module for zabbix items +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix item ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + ansible zabbix module for zbx_item + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + template_name=dict(default=None, type='str'), + zabbix_type=dict(default=2, type='int'), + value_type=dict(default='int', type='str'), + applications=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'item' + idname = "itemid" + state = module.params['state'] + key = module.params['key'] + template_name = module.params['template_name'] + + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + templateid = None + if content['result']: + templateid = content['result'][0]['templateid'] + else: + module.exit_json(changed=False, + results='Error: Could find template with name %s for item.' % template_name, + state="Unkown") + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'key_': key}, + 'selectApplications': 'applicationid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': module.params.get('name', module.params['key']), + 'key_': key, + 'hostid': templateid, + 'type': module.params['zabbix_type'], + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_itemprototype.py b/roles/os_zabbix/library/zbx_itemprototype.py new file mode 100644 index 000000000..f0eb6bbbd --- /dev/null +++ b/roles/os_zabbix/library/zbx_itemprototype.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rule_id(zapi, discoveryrule_name): + '''get a discoveryrule by name + ''' + content = zapi.get_content('discoveryrule', + 'get', + {'search': {'name': discoveryrule_name}, + 'output': 'extend', + }) + if not content['result']: + return None + return content['result'][0]['itemid'] + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(ztype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'aggregate': 8, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'calculated': 15, + 'JMX': 16, + } + + for typ in _types.keys(): + if ztype in typ or ztype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_status(status): + ''' Determine status + ''' + _status = 0 + if status == 'disabled': + _status = 1 + elif status == 'unsupported': + _status = 3 + + return _status + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=os.environ['ZABBIX_USER'], type='str'), + password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + value_type=dict(default='float', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + status=dict(default='enabled', type='str'), + discoveryrule_name=dict(default=None, type='str'), + applications=dict(default=[], type='list'), + ), + #supports_check_mode=True + ) + + user = module.params['user'] + passwd = module.params['password'] + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'itemprototype' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + 'selectApplications': 'applicationid', + 'selectDiscoveryRule': 'itemid', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), + 'type': get_type(module.params['ztype']), + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + if params['type'] in [2, 5, 7, 8, 11, 15]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'ruleid': + if value != zab_results['discoveryRule']['itemid']: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py new file mode 100644 index 000000000..a49aecd0f --- /dev/null +++ b/roles/os_zabbix/library/zbx_mediatype.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +''' + Ansible module for mediatype +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix mediatype ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True +def get_mtype(mtype): + ''' + Transport used by the media type. + Possible values: + 0 - email; + 1 - script; + 2 - SMS; + 3 - Jabber; + 100 - Ez Texting. + ''' + mtype = mtype.lower() + media_type = None + if mtype == 'script': + media_type = 1 + elif mtype == 'sms': + media_type = 2 + elif mtype == 'jabber': + media_type = 3 + elif mtype == 'script': + media_type = 100 + else: + media_type = 0 + + return media_type + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + description=dict(default=None, type='str'), + mtype=dict(default=None, type='str'), + smtp_server=dict(default=None, type='str'), + smtp_helo=dict(default=None, type='str'), + smtp_email=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'mediatype' + idname = "mediatypeid" + description = module.params['description'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'type': get_mtype(module.params['media_type']), + 'smtp_server': module.params['smtp_server'], + 'smtp_helo': module.params['smtp_helo'], + 'smtp_email': module.params['smtp_email'], + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and \ + zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py new file mode 100644 index 000000000..20ea48a85 --- /dev/null +++ b/roles/os_zabbix/library/zbx_template.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +''' +Ansible module for template +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix template ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' Ansible module for template + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug']) + zapi = ZabbixAPI(zbc) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'template' + idname = 'templateid' + tname = module.params['name'] + state = module.params['state'] + # get a template, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': tname}, + 'selectParentTemplates': 'templateid', + 'selectGroups': 'groupid', + 'selectApplications': 'applicationid', + 'selectDiscoveries': 'extend', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'groups': module.params.get('groups', [{'groupid': '1'}]), + 'host': tname, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py new file mode 100644 index 000000000..7cc9356c8 --- /dev/null +++ b/roles/os_zabbix/library/zbx_trigger.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +''' +ansible module for zabbix triggers +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix trigger ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + + +def get_priority(priority): + ''' determine priority + ''' + prior = 0 + if 'info' in priority: + prior = 1 + elif 'warn' in priority: + prior = 2 + elif 'avg' == priority or 'ave' in priority: + prior = 3 + elif 'high' in priority: + prior = 4 + elif 'dis' in priority: + prior = 5 + + return prior + +def get_deps(zapi, deps): + ''' get trigger dependencies + ''' + results = [] + for desc in deps: + content = zapi.get_content('trigger', + 'get', + {'search': {'description': desc}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if content.has_key('result'): + results.append({'triggerid': content['result'][0]['triggerid']}) + + return results + +def main(): + ''' + Create a trigger in zabbix + + Example: + "params": { + "description": "Processor load is too high on {HOST.NAME}", + "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", + "dependencies": [ + { + "triggerid": "14062" + } + ] + }, + + ''' + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + expression=dict(default=None, type='str'), + description=dict(default=None, type='str'), + dependencies=dict(default=[], type='list'), + priority=dict(default='avg', type='str'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'trigger' + idname = "triggerid" + state = module.params['state'] + description = module.params['description'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'description': description}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'expression': module.params['expression'], + 'dependencies': get_deps(zapi, module.params['dependencies']), + 'priority': get_priority(module.params['priority']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py new file mode 100644 index 000000000..c45c9a75d --- /dev/null +++ b/roles/os_zabbix/library/zbx_user.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +''' +ansible module for zabbix users +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_usergroups(zapi, usergroups): + ''' Get usergroups + ''' + ugroups = [] + for ugr in usergroups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': ugr}, + #'selectUsers': 'userid', + #'getRights': 'extend' + }) + if content['result']: + ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) + + return ugroups or None + +def get_usertype(user_type): + ''' + Determine zabbix user account type + ''' + if not user_type: + return None + + utype = 1 + if 'super' in user_type: + utype = 3 + elif 'admin' in user_type or user_type == 'admin': + utype = 2 + + return utype + +def main(): + ''' + ansible zabbix module for users + ''' + + ##def user(self, name, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + alias=dict(default=None, type='str'), + name=dict(default=None, type='str'), + surname=dict(default=None, type='str'), + user_type=dict(default=None, type='str'), + passwd=dict(default=None, type='str'), + usergroups=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, password, module.params['debug'])) + + ## before we can create a user media and users with media types we need media + zbx_class_name = 'user' + idname = "userid" + alias = module.params['alias'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'output': 'extend', + 'search': {'alias': alias}, + "selectUsrgrps": 'usergrpid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'alias': alias, + 'passwd': module.params['passwd'], + 'usrgrps': get_usergroups(zapi, module.params['usergroups']), + 'name': module.params['name'], + 'surname': module.params['surname'], + 'type': get_usertype(module.params['user_type']), + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'passwd': + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py new file mode 100644 index 000000000..ede4c9df1 --- /dev/null +++ b/roles/os_zabbix/library/zbx_usergroup.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +''' +zabbix ansible module for usergroups +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix usergroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rights(zapi, rights): + '''Get rights + ''' + perms = [] + for right in rights: + hstgrp = right.keys()[0] + perm = right.values()[0] + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) + if content['result']: + permission = 0 + if perm == 'ro': + permission = 2 + elif perm == 'rw': + permission = 3 + perms.append({'id': content['result'][0]['groupid'], + 'permission': permission}) + return perms + +def get_userids(zapi, users): + ''' Get userids from user aliases + ''' + userids = [] + for alias in users: + content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) + if content['result']: + userids.append(content['result'][0]['userid']) + + return userids + +def main(): + ''' Ansible module for usergroup + ''' + + ##def usergroup(self, name, rights=None, users=None, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + name=dict(default=None, type='str'), + rights=dict(default=[], type='list'), + users=dict(default=[], type='list'), + debug=dict(default=False, type='bool'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', os.environ['ZABBIX_USER']) + passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) + + zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) + + zbx_class_name = 'usergroup' + idname = "usrgrpid" + uname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': uname}, + 'selectUsers': 'userid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': uname, + 'rights': get_rights(zapi, module.params['rights']), + 'userids': get_userids(zapi, module.params['users']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'rights': + differences['rights'] = value + + elif key == 'userids' and zab_results.has_key('users'): + if zab_results['users'] != value: + differences['userids'] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/os_zabbix/meta/main.yml b/roles/os_zabbix/meta/main.yml deleted file mode 100644 index 360f5aad2..000000000 --- a/roles/os_zabbix/meta/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: ZabbixAPI - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 1.2 -dependencies: -- lib_zabbix diff --git a/roles/os_zabbix/tasks/clean_zabbix.yml b/roles/os_zabbix/tasks/clean_zabbix.yml deleted file mode 100644 index 1bcc07d42..000000000 --- a/roles/os_zabbix/tasks/clean_zabbix.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: CLEAN List template for heartbeat - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - state: list - name: 'Template Heartbeat' - register: templ_heartbeat - -- name: CLEAN List template app zabbix server - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - state: list - name: 'Template App Zabbix Server' - register: templ_zabbix_server - -- name: CLEAN List template app zabbix server - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - state: list - name: 'Template App Zabbix Agent' - register: templ_zabbix_agent - -- name: CLEAN List all templates - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - state: list - register: templates - -- debug: var=templ_heartbeat.results - -- name: Remove templates if heartbeat template is missing - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - name: "{{ item }}" - state: absent - with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" - when: templ_heartbeat.results | length == 0 diff --git a/roles/os_zabbix/tasks/create_template.yml b/roles/os_zabbix/tasks/create_template.yml deleted file mode 100644 index 070390aba..000000000 --- a/roles/os_zabbix/tasks/create_template.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- debug: var=template - -- name: Template Create Template - zbx_template: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - name: "{{ template.name }}" - register: created_template - -- debug: var=created_template - -- name: Create Application - zbx_application: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - name: "{{ item }}" - template_name: "{{ template.name }}" - with_items: template.zapplications - register: created_application - when: template.zapplications is defined - -- debug: var=created_application - -- name: Create Items - zbx_item: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - key: "{{ item.key }}" - name: "{{ item.name | default(item.key, true) }}" - value_type: "{{ item.value_type | default('int') }}" - template_name: "{{ template.name }}" - applications: "{{ item.application }}" - with_items: template.zitems - register: created_items - when: template.zitems is defined - -#- debug: var=ctp_created_items - -- name: Create Triggers - zbx_trigger: - server: "{{ server }}" - user: "{{ user }}" - password: "{{ password }}" - description: "{{ item.description }}" - expression: "{{ item.expression }}" - priority: "{{ item.priority }}" - with_items: template.ztriggers - when: template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/roles/os_zabbix/tasks/create_user.yml b/roles/os_zabbix/tasks/create_user.yml deleted file mode 100644 index 1f752a9e1..000000000 --- a/roles/os_zabbix/tasks/create_user.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Update zabbix credentialss for a user - zbx_user: - server: "{{ ozb_server }}" - user: "{{ ozb_user }}" - password: "{{ ozb_password }}" - alias: "{{ ozb_username }}" - passwd: "{{ ozb_new_password | default(ozb_password, true) }}" - register: user - -- debug: var=user.results diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml deleted file mode 100644 index 06c0a09fc..000000000 --- a/roles/os_zabbix/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Main List all templates - zbx_template: - server: "{{ ozb_server }}" - user: "{{ ozb_user }}" - password: "{{ ozb_password }}" - state: list - register: templates - -- debug: var=templates - -- include_vars: template_heartbeat.yml -- include_vars: template_os_linux.yml - -- name: Include Template Heartbeat - include: create_template.yml - vars: - template: "{{ g_template_heartbeat }}" - server: "{{ ozb_server }}" - user: "{{ ozb_user }}" - password: "{{ ozb_password }}" - -#- name: Include Template os_linux -# include: create_template.yml -# vars: -# template: "{{ g_template_os_linux }}" -# server: "{{ ozb_server }}" -# user: "{{ ozb_user }}" -# password: "{{ ozb_password }}" - diff --git a/roles/os_zabbix/vars/main.yml b/roles/os_zabbix/vars/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/os_zabbix/vars/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/os_zabbix/vars/template_heartbeat.yml b/roles/os_zabbix/vars/template_heartbeat.yml deleted file mode 100644 index 158d6c79a..000000000 --- a/roles/os_zabbix/vars/template_heartbeat.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -g_template_heartbeat: - name: Template Heartbeat - zapplications: - - Heartbeat - zitems: - - name: Heartbeat Ping - application: Heartbeat - key: heartbeat.ping - ztriggers: - - description: 'Heartbeat.ping has failed on {HOST.NAME}' - expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' - priority: avg diff --git a/roles/os_zabbix/vars/template_host.yml b/roles/os_zabbix/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/roles/os_zabbix/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: - params: - name: Template Host - host: Template Host - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Host - zitems: - - name: Host Ping - hostid: - key_: host.ping - type: 2 - value_type: 0 - output: extend - search: - key_: host.ping - ztriggers: - - description: 'Host ping has failed on {HOST.NAME}' - expression: '{Template Host:host.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Host ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_master.yml b/roles/os_zabbix/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/roles/os_zabbix/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: - params: - name: Template Master - host: Template Master - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Master - zitems: - - name: Master Etcd Ping - hostid: - key_: master.etcd.ping - type: 2 - value_type: 0 - output: extend - search: - key_: master.etcd.ping - ztriggers: - - description: 'Master Etcd ping has failed on {HOST.NAME}' - expression: '{Template Master:master.etcd.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Master Etcd ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_node.yml b/roles/os_zabbix/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/roles/os_zabbix/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: - params: - name: Template Node - host: Template Node - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Node - zitems: - - name: Kubelet Ping - hostid: - key_: kubelet.ping - type: 2 - value_type: 0 - output: extend - search: - key_: kubelet.ping - ztriggers: - - description: 'Kubelet ping has failed on {HOST.NAME}' - expression: '{Template Node:kubelet.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Kubelet ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml deleted file mode 100644 index 1cc928bce..000000000 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ /dev/null @@ -1,143 +0,0 @@ ---- -g_template_os_linux: - name: Template OS Linux - zapplications: - - Disk - - Memory - - Kernel - zitems: - - key: kernel.uname.sysname - application: Kernel - value_type: string - - - key: kernel.all.cpu.wait.total - application: Kernel - value_type: int - - - key: kernel.all.cpu.irq.hard - application: Kernel - value_type: int - - - key: kernel.all.cpu.idle - application: Kernel - value_type: int - - - key: kernel.uname.distro - application: Kernel - value_type: string - - - key: kernel.uname.nodename - application: Kernel - value_type: string - - - key: kernel.all.cpu.irq.soft - application: Kernel - value_type: int - - - key: kernel.all.load.15_minute - application: Kernel - value_type: float - - - key: kernel.all.cpu.sys - application: Kernel - value_type: int - - - key: kernel.all.load.5_minute - application: Kernel - value_type: float - - - key: mem.freemem - application: Memory - value_type: int - - - key: kernel.all.cpu.nice - application: Kernel - value_type: int - - - key: mem.util.bufmem - application: Memory - value_type: int - - - key: swap.used - application: Memory - value_type: int - - - key: kernel.all.load.1_minute - application: Kernel - value_type: float - - - key: kernel.uname.version - application: Kernel - value_type: string - - - key: swap.length - application: Memory - value_type: int - - - key: mem.physmem - application: Memory - value_type: int - - - key: kernel.all.uptime - application: Kernel - value_type: int - - - key: swap.free - application: Memory - value_type: int - - - key: mem.util.used - application: Memory - value_type: int - - - key: kernel.all.cpu.user - application: Kernel - value_type: int - - - key: kernel.uname.machine - application: Kernel - value_type: string - - - key: hinv.ncpu - application: Kernel - value_type: int - - - key: mem.util.cached - application: Memory - value_type: int - - - key: kernel.all.cpu.steal - application: Kernel - value_type: int - - - key: kernel.all.pswitch - application: Kernel - value_type: int - - - key: kernel.uname.release - application: Kernel - value_type: string - - - key: proc.nprocs - application: Kernel - value_type: int - - - key: filesys.avail - application: Disk - value_type: int - - - key: filesys.capacity - application: Disk - value_type: int - - - key: filesys.free - application: Disk - value_type: int - - - key: filesys.full - application: Disk - value_type: float - - - key: filesys.used - application: Disk - value_type: float diff --git a/roles/os_zabbix/vars/template_router.yml b/roles/os_zabbix/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/roles/os_zabbix/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: - params: - name: Template Router - host: Template Router - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Router - zitems: - - name: Router Backends down - hostid: - key_: router.backends.down - type: 2 - value_type: 0 - output: extend - search: - key_: router.backends.down - ztriggers: - - description: 'Number of router backends down on {HOST.NAME}' - expression: '{Template Router:router.backends.down.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Number of router backends down on {HOST.NAME}' - expandExpression: True -- cgit v1.2.3 From 693be4802c2b3886b82681c5c1666b9f13d9ca36 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Fri, 21 Aug 2015 17:44:30 -0400 Subject: Updates for zbx ans module --- filter_plugins/oo_filters.py | 24 +- filter_plugins/oo_zabbix_filters.py | 29 +++ git/pylint.sh | 2 +- playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 57 ++--- playbooks/adhoc/zabbix_setup/create_template.yml | 57 ----- playbooks/adhoc/zabbix_setup/create_user.yml | 31 --- playbooks/adhoc/zabbix_setup/filter_plugins | 1 - playbooks/adhoc/zabbix_setup/roles | 1 - playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 38 ---- .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 11 - .../adhoc/zabbix_setup/vars/template_host.yml | 27 --- .../adhoc/zabbix_setup/vars/template_master.yml | 27 --- .../adhoc/zabbix_setup/vars/template_node.yml | 27 --- .../adhoc/zabbix_setup/vars/template_os_linux.yml | 90 -------- .../adhoc/zabbix_setup/vars/template_router.yml | 27 --- roles/lib_zabbix/README.md | 38 ++++ roles/lib_zabbix/library/__init__.py | 3 + roles/lib_zabbix/library/zbx_application.py | 139 ++++++++++++ roles/lib_zabbix/library/zbx_discoveryrule.py | 177 +++++++++++++++ roles/lib_zabbix/library/zbx_host.py | 163 ++++++++++++++ roles/lib_zabbix/library/zbx_hostgroup.py | 116 ++++++++++ roles/lib_zabbix/library/zbx_item.py | 178 +++++++++++++++ roles/lib_zabbix/library/zbx_itemprototype.py | 241 ++++++++++++++++++++ roles/lib_zabbix/library/zbx_mediatype.py | 168 ++++++++++++++ roles/lib_zabbix/library/zbx_template.py | 132 +++++++++++ roles/lib_zabbix/library/zbx_trigger.py | 173 +++++++++++++++ roles/lib_zabbix/library/zbx_user.py | 190 ++++++++++++++++ roles/lib_zabbix/library/zbx_user_media.py | 245 +++++++++++++++++++++ roles/lib_zabbix/library/zbx_usergroup.py | 208 +++++++++++++++++ roles/lib_zabbix/tasks/create_template.yml | 61 +++++ roles/lib_zabbix/tasks/create_user.yml | 11 + roles/os_zabbix/README.md | 40 ++++ roles/os_zabbix/defaults/main.yml | 1 + roles/os_zabbix/handlers/main.yml | 1 + roles/os_zabbix/library/__init__.py | 0 roles/os_zabbix/library/get_drule.yml | 115 ---------- roles/os_zabbix/library/test.yml | 131 ----------- roles/os_zabbix/library/zbx_application.py | 135 ------------ roles/os_zabbix/library/zbx_discoveryrule.py | 177 --------------- roles/os_zabbix/library/zbx_host.py | 163 -------------- roles/os_zabbix/library/zbx_hostgroup.py | 116 ---------- roles/os_zabbix/library/zbx_item.py | 170 -------------- roles/os_zabbix/library/zbx_itemprototype.py | 241 -------------------- roles/os_zabbix/library/zbx_mediatype.py | 149 ------------- roles/os_zabbix/library/zbx_template.py | 127 ----------- roles/os_zabbix/library/zbx_trigger.py | 175 --------------- roles/os_zabbix/library/zbx_user.py | 169 -------------- roles/os_zabbix/library/zbx_usergroup.py | 160 -------------- roles/os_zabbix/meta/main.yml | 9 + roles/os_zabbix/tasks/main.yml | 30 +++ roles/os_zabbix/vars/main.yml | 1 + roles/os_zabbix/vars/template_heartbeat.yml | 12 + roles/os_zabbix/vars/template_host.yml | 27 +++ roles/os_zabbix/vars/template_master.yml | 27 +++ roles/os_zabbix/vars/template_node.yml | 27 +++ roles/os_zabbix/vars/template_os_linux.yml | 173 +++++++++++++++ roles/os_zabbix/vars/template_router.yml | 27 +++ 57 files changed, 2702 insertions(+), 2393 deletions(-) delete mode 100644 playbooks/adhoc/zabbix_setup/create_template.yml delete mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml delete mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins delete mode 120000 playbooks/adhoc/zabbix_setup/roles delete mode 100644 playbooks/adhoc/zabbix_setup/setup_zabbix.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_host.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_master.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_node.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_router.yml create mode 100644 roles/lib_zabbix/README.md create mode 100644 roles/lib_zabbix/library/__init__.py create mode 100644 roles/lib_zabbix/library/zbx_application.py create mode 100644 roles/lib_zabbix/library/zbx_discoveryrule.py create mode 100644 roles/lib_zabbix/library/zbx_host.py create mode 100644 roles/lib_zabbix/library/zbx_hostgroup.py create mode 100644 roles/lib_zabbix/library/zbx_item.py create mode 100644 roles/lib_zabbix/library/zbx_itemprototype.py create mode 100644 roles/lib_zabbix/library/zbx_mediatype.py create mode 100644 roles/lib_zabbix/library/zbx_template.py create mode 100644 roles/lib_zabbix/library/zbx_trigger.py create mode 100644 roles/lib_zabbix/library/zbx_user.py create mode 100644 roles/lib_zabbix/library/zbx_user_media.py create mode 100644 roles/lib_zabbix/library/zbx_usergroup.py create mode 100644 roles/lib_zabbix/tasks/create_template.yml create mode 100644 roles/lib_zabbix/tasks/create_user.yml create mode 100644 roles/os_zabbix/README.md create mode 100644 roles/os_zabbix/defaults/main.yml create mode 100644 roles/os_zabbix/handlers/main.yml delete mode 100644 roles/os_zabbix/library/__init__.py delete mode 100644 roles/os_zabbix/library/get_drule.yml delete mode 100644 roles/os_zabbix/library/test.yml delete mode 100644 roles/os_zabbix/library/zbx_application.py delete mode 100644 roles/os_zabbix/library/zbx_discoveryrule.py delete mode 100644 roles/os_zabbix/library/zbx_host.py delete mode 100644 roles/os_zabbix/library/zbx_hostgroup.py delete mode 100644 roles/os_zabbix/library/zbx_item.py delete mode 100644 roles/os_zabbix/library/zbx_itemprototype.py delete mode 100644 roles/os_zabbix/library/zbx_mediatype.py delete mode 100644 roles/os_zabbix/library/zbx_template.py delete mode 100644 roles/os_zabbix/library/zbx_trigger.py delete mode 100644 roles/os_zabbix/library/zbx_user.py delete mode 100644 roles/os_zabbix/library/zbx_usergroup.py create mode 100644 roles/os_zabbix/meta/main.yml create mode 100644 roles/os_zabbix/tasks/main.yml create mode 100644 roles/os_zabbix/vars/main.yml create mode 100644 roles/os_zabbix/vars/template_heartbeat.yml create mode 100644 roles/os_zabbix/vars/template_host.yml create mode 100644 roles/os_zabbix/vars/template_master.yml create mode 100644 roles/os_zabbix/vars/template_node.yml create mode 100644 roles/os_zabbix/vars/template_os_linux.yml create mode 100644 roles/os_zabbix/vars/template_router.yml (limited to 'playbooks') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index c3408702d..a57b0f895 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -73,7 +73,7 @@ class FilterModule(object): if filters is not None: if not issubclass(type(filters), dict): - raise errors.AnsibleFilterError("|fialed expects filter to be a" + raise errors.AnsibleFilterError("|failed expects filter to be a" " dict") retval = [FilterModule.get_attr(d, attribute) for d in data if ( all([d.get(key, None) == filters[key] for key in filters]))] @@ -82,6 +82,25 @@ class FilterModule(object): return retval + @staticmethod + def oo_select_keys_from_list(data, keys): + ''' This returns a list, which contains the value portions for the keys + Ex: data = { 'a':1, 'b':2, 'c':3 } + keys = ['a', 'c'] + returns [1, 3] + ''' + + if not issubclass(type(data), list): + raise errors.AnsibleFilterError("|failed expects to filter on a list") + + if not issubclass(type(keys), list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + + # Gather up the values for the list of keys passed in + retval = [FilterModule.oo_select_keys(item, keys) for item in data] + + return FilterModule.oo_flatten(retval) + @staticmethod def oo_select_keys(data, keys): ''' This returns a list, which contains the value portions for the keys @@ -97,7 +116,7 @@ class FilterModule(object): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in - retval = [data[key] for key in keys] + retval = [data[key] for key in keys if data.has_key(key)] return retval @@ -312,6 +331,7 @@ class FilterModule(object): ''' returns a mapping of filters to methods ''' return { "oo_select_keys": self.oo_select_keys, + "oo_select_keys_from_list": self.oo_select_keys_from_list, "oo_collect": self.oo_collect, "oo_flatten": self.oo_flatten, "oo_pdb": self.oo_pdb, diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py index a473993a2..c44b874e8 100644 --- a/filter_plugins/oo_zabbix_filters.py +++ b/filter_plugins/oo_zabbix_filters.py @@ -59,6 +59,17 @@ class FilterModule(object): return data[zabbix_item]['params'] return None + @staticmethod + def oo_build_zabbix_collect(data, string, value): + ''' Build a list of dicts from a list of data matched on string attribute + ''' + rval = [] + for item in data: + if item[string] == value: + rval.append(item) + + return rval + @staticmethod def oo_build_zabbix_list_dict(values, string): ''' Build a list of dicts with string as key for each value @@ -68,6 +79,22 @@ class FilterModule(object): rval.append({string: value}) return rval + @staticmethod + def oo_remove_attr_from_list_dict(data, attr): + ''' Remove a specific attribute from a dict + ''' + attrs = [] + if isinstance(attr, str): + attrs.append(attr) + else: + attrs = attr + + for attribute in attrs: + for _entry in data: + _entry.pop(attribute, None) + + return data + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -76,4 +103,6 @@ class FilterModule(object): "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid, "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, "create_data": self.create_data, + "oo_build_zabbix_collect": self.oo_build_zabbix_collect, + "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict, } diff --git a/git/pylint.sh b/git/pylint.sh index 8666931e9..55e8b6131 100755 --- a/git/pylint.sh +++ b/git/pylint.sh @@ -13,7 +13,7 @@ OLDREV=$1 NEWREV=$2 #TRG_BRANCH=$3 -PYTHON=/var/lib/jenkins/python27/bin/python +PYTHON=$(which python) set +e PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$") diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index a31cbef65..1e884240a 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -2,50 +2,57 @@ - hosts: localhost gather_facts: no vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix + g_server: http://localhost:8080/zabbix/api_jsonrpc.php + g_user: '' + g_password: '' + roles: - - ../../../roles/os_zabbix - post_tasks: + - lib_zabbix - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + post_tasks: + - name: CLEAN List template for heartbeat + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template Heartbeat' register: templ_heartbeat - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template App Zabbix Server' register: templ_zabbix_server - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template App Zabbix Agent' register: templ_zabbix_agent - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List all templates + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list register: templates - debug: var=templ_heartbeat.results - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: Remove templates if heartbeat template is missing + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + name: "{{ item }}" state: absent with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml deleted file mode 100644 index 50fff53b2..000000000 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- debug: var=ctp_template - -- name: Create Template - zbx_template: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - name: "{{ ctp_template.name }}" - register: ctp_created_template - -- debug: var=ctp_created_template - -#- name: Create Application -# zbxapi: -# server: "{{ ctp_zserver }}" -# user: "{{ ctp_zuser }}" -# password: "{{ ctp_zpassword }}" -# zbx_class: Application -# state: present -# params: -# name: "{{ ctp_template.application.name}}" -# hostid: "{{ ctp_created_template.results[0].templateid }}" -# search: -# name: "{{ ctp_template.application.name}}" -# register: ctp_created_application - -#- debug: var=ctp_created_application - -- name: Create Items - zbx_item: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - key: "{{ item.key }}" - name: "{{ item.name | default(item.key, true) }}" - value_type: "{{ item.value_type | default('int') }}" - template_name: "{{ ctp_template.name }}" - with_items: ctp_template.zitems - register: ctp_created_items - -#- debug: var=ctp_created_items - -- name: Create Triggers - zbx_trigger: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - description: "{{ item.description }}" - expression: "{{ item.expression }}" - priority: "{{ item.priority }}" - with_items: ctp_template.ztriggers - when: ctp_template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml deleted file mode 100644 index dd74798b7..000000000 --- a/playbooks/adhoc/zabbix_setup/create_user.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' -# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: admin - g_zpassword: "{{ cli_password }}" - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: users - - - debug: var=users - - - name: Update zabbix creds for admin - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - alias: Admin - passwd: "{{ cli_new_password | default(g_zpassword, true) }}" diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/adhoc/zabbix_setup/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml deleted file mode 100644 index 1729194b5..000000000 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: templates - - - debug: var=templates - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_heartbeat }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_os_linux }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml deleted file mode 100644 index 22cc75554..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -g_template_heartbeat: - name: Template Heartbeat - zitems: - - name: Heartbeat Ping - hostid: - key: heartbeat.ping - ztriggers: - - description: 'Heartbeat.ping has failed on {HOST.NAME}' - expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' - priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: - params: - name: Template Host - host: Template Host - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Host - zitems: - - name: Host Ping - hostid: - key_: host.ping - type: 2 - value_type: 0 - output: extend - search: - key_: host.ping - ztriggers: - - description: 'Host ping has failed on {HOST.NAME}' - expression: '{Template Host:host.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Host ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: - params: - name: Template Master - host: Template Master - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Master - zitems: - - name: Master Etcd Ping - hostid: - key_: master.etcd.ping - type: 2 - value_type: 0 - output: extend - search: - key_: master.etcd.ping - ztriggers: - - description: 'Master Etcd ping has failed on {HOST.NAME}' - expression: '{Template Master:master.etcd.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Master Etcd ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: - params: - name: Template Node - host: Template Node - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Node - zitems: - - name: Kubelet Ping - hostid: - key_: kubelet.ping - type: 2 - value_type: 0 - output: extend - search: - key_: kubelet.ping - ztriggers: - - description: 'Kubelet ping has failed on {HOST.NAME}' - expression: '{Template Node:kubelet.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Kubelet ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml deleted file mode 100644 index 9cc038ffa..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -g_template_os_linux: - name: Template OS Linux - zitems: - - key: kernel.uname.sysname - value_type: string - - - key: kernel.all.cpu.wait.total - value_type: int - - - key: kernel.all.cpu.irq.hard - value_type: int - - - key: kernel.all.cpu.idle - value_type: int - - - key: kernel.uname.distro - value_type: string - - - key: kernel.uname.nodename - value_type: string - - - key: kernel.all.cpu.irq.soft - value_type: int - - - key: kernel.all.load.15_minute - value_type: float - - - key: kernel.all.cpu.sys - value_type: int - - - key: kernel.all.load.5_minute - value_type: float - - - key: mem.freemem - value_type: int - - - key: kernel.all.cpu.nice - value_type: int - - - key: mem.util.bufmem - value_type: int - - - key: swap.used - value_type: int - - - key: kernel.all.load.1_minute - value_type: float - - - key: kernel.uname.version - value_type: string - - - key: swap.length - value_type: int - - - key: mem.physmem - value_type: int - - - key: kernel.all.uptime - value_type: int - - - key: swap.free - value_type: int - - - key: mem.util.used - value_type: int - - - key: kernel.all.cpu.user - value_type: int - - - key: kernel.uname.machine - value_type: string - - - key: hinv.ncpu - value_type: int - - - key: mem.util.cached - value_type: int - - - key: kernel.all.cpu.steal - value_type: int - - - key: kernel.all.pswitch - value_type: int - - - key: kernel.uname.release - value_type: string - - - key: proc.nprocs - value_type: int diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: - params: - name: Template Router - host: Template Router - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Router - zitems: - - name: Router Backends down - hostid: - key_: router.backends.down - type: 2 - value_type: 0 - output: extend - search: - key_: router.backends.down - ztriggers: - - description: 'Number of router backends down on {HOST.NAME}' - expression: '{Template Router:router.backends.down.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Number of router backends down on {HOST.NAME}' - expandExpression: True diff --git a/roles/lib_zabbix/README.md b/roles/lib_zabbix/README.md new file mode 100644 index 000000000..69debc698 --- /dev/null +++ b/roles/lib_zabbix/README.md @@ -0,0 +1,38 @@ +zabbix +========= + +Automate zabbix tasks. + +Requirements +------------ + +This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Role Variables +-------------- + +None + +Dependencies +------------ + +This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Example Playbook +---------------- + + - zbx_host: + server: zab_server + user: zab_user + password: zab_password + name: 'myhost' + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/lib_zabbix/library/__init__.py b/roles/lib_zabbix/library/__init__.py new file mode 100644 index 000000000..0c7e19e41 --- /dev/null +++ b/roles/lib_zabbix/library/__init__.py @@ -0,0 +1,3 @@ +''' +ZabbixAPI ansible module +''' diff --git a/roles/lib_zabbix/library/zbx_application.py b/roles/lib_zabbix/library/zbx_application.py new file mode 100644 index 000000000..ffa00e052 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_application.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +''' +Ansible module for application +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix application ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append(content['result'][0]['templateid']) + return template_ids + +def main(): + ''' Ansible module for application + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + template_name=dict(default=None, type='list'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the application for the rest of the calls + zbx_class_name = 'application' + idname = 'applicationid' + aname = module.params['name'] + state = module.params['state'] + # get a applicationid, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': aname}, + 'selectHost': 'hostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], + 'name': aname, + } + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_discoveryrule.py b/roles/lib_zabbix/library/zbx_discoveryrule.py new file mode 100644 index 000000000..79ee91180 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_discoveryrule.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(vtype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'JMX': 16, + } + + for typ in _types.keys(): + if vtype in typ or vtype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'discoveryrule' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + #'selectDServices': 'extend', + #'selectDChecks': 'extend', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'lifetime': module.params['lifetime'], + 'type': get_type(module.params['ztype']), + } + if params['type'] in [2, 5, 7, 11]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py new file mode 100644 index 000000000..7be03653e --- /dev/null +++ b/roles/lib_zabbix/library/zbx_host.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +''' +Zabbix host ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_group_ids(zapi, hostgroup_names): + ''' + get hostgroups + ''' + # Fetch groups by name + group_ids = [] + for hgr in hostgroup_names: + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) + if content.has_key('result'): + group_ids.append({'groupid': content['result'][0]['groupid']}) + + return group_ids + +def get_template_ids(zapi, template_names): + ''' + get related templates + ''' + template_ids = [] + # Fetch templates by name + for template_name in template_names: + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + if content.has_key('result'): + template_ids.append({'templateid': content['result'][0]['templateid']}) + return template_ids + +def main(): + ''' + Ansible module for zabbix host + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + hostgroup_names=dict(default=[], type='list'), + template_names=dict(default=[], type='list'), + state=dict(default='present', type='str'), + interfaces=dict(default=None, type='list'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'host' + idname = "hostid" + hname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': hname}, + 'selectGroups': 'groupid', + 'selectParentTemplates': 'templateid', + 'selectInterfaces': 'interfaceid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent + 'main': 1, # default interface? 1 = true + 'useip': 1, # default interface? 1 = true + 'ip': '127.0.0.1', # default interface? 1 = true + 'dns': '', # dns for host + 'port': '10050', # port for interface? 10050 + }] + params = {'host': hname, + 'groups': get_group_ids(zapi, module.params['hostgroup_names']), + 'templates': get_template_ids(zapi, module.params['template_names']), + 'interfaces': ifs, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_hostgroup.py b/roles/lib_zabbix/library/zbx_hostgroup.py new file mode 100644 index 000000000..f161d3d9f --- /dev/null +++ b/roles/lib_zabbix/library/zbx_hostgroup.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +''' Ansible module for hostgroup +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix hostgroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' ansible module for hostgroup + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'hostgroup' + idname = "groupid" + hname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': hname}, + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': hname} + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py new file mode 100644 index 000000000..60da243fe --- /dev/null +++ b/roles/lib_zabbix/library/zbx_item.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +''' + Ansible module for zabbix items +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix item ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + if isinstance(application_names, str): + application_names = [application_names] + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + ansible zabbix module for zbx_item + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + template_name=dict(default=None, type='str'), + zabbix_type=dict(default=2, type='int'), + value_type=dict(default='int', type='str'), + applications=dict(default=[], type='list'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'item' + idname = "itemid" + state = module.params['state'] + key = module.params['key'] + template_name = module.params['template_name'] + + content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) + templateid = None + if content['result']: + templateid = content['result'][0]['templateid'] + else: + module.exit_json(changed=False, + results='Error: Could find template with name %s for item.' % template_name, + state="Unkown") + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'key_': key}, + 'selectApplications': 'applicationid', + }) + + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'name': module.params.get('name', module.params['key']), + 'key_': key, + 'hostid': templateid, + 'type': module.params['zabbix_type'], + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'applications': + zab_apps = set([item['applicationid'] for item in zab_results[key]]) + if zab_apps != set(value): + differences[key] = zab_apps + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py new file mode 100644 index 000000000..47100f613 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_itemprototype.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python +''' +Zabbix discovery rule ansible module +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rule_id(zapi, discoveryrule_name): + '''get a discoveryrule by name + ''' + content = zapi.get_content('discoveryrule', + 'get', + {'search': {'name': discoveryrule_name}, + 'output': 'extend', + }) + if not content['result']: + return None + return content['result'][0]['itemid'] + +def get_template(zapi, template_name): + '''get a template by name + ''' + content = zapi.get_content('template', + 'get', + {'search': {'host': template_name}, + 'output': 'extend', + 'selectInterfaces': 'interfaceid', + }) + if not content['result']: + return None + return content['result'][0] + +def get_type(ztype): + ''' + Determine which type of discoverrule this is + ''' + _types = {'agent': 0, + 'SNMPv1': 1, + 'trapper': 2, + 'simple': 3, + 'SNMPv2': 4, + 'internal': 5, + 'SNMPv3': 6, + 'active': 7, + 'aggregate': 8, + 'external': 10, + 'database monitor': 11, + 'ipmi': 12, + 'ssh': 13, + 'telnet': 14, + 'calculated': 15, + 'JMX': 16, + } + + for typ in _types.keys(): + if ztype in typ or ztype == typ: + _vtype = _types[typ] + break + else: + _vtype = 2 + + return _vtype + +def get_value_type(value_type): + ''' + Possible values: + 0 - numeric float; + 1 - character; + 2 - log; + 3 - numeric unsigned; + 4 - text + ''' + vtype = 0 + if 'int' in value_type: + vtype = 3 + elif 'char' in value_type: + vtype = 1 + elif 'str' in value_type: + vtype = 4 + + return vtype + +def get_status(status): + ''' Determine status + ''' + _status = 0 + if status == 'disabled': + _status = 1 + elif status == 'unsupported': + _status = 3 + + return _status + +def get_app_ids(zapi, application_names): + ''' get application ids from names + ''' + app_ids = [] + for app_name in application_names: + content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) + if content.has_key('result'): + app_ids.append(content['result'][0]['applicationid']) + return app_ids + +def main(): + ''' + Ansible module for zabbix discovery rules + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + key=dict(default=None, type='str'), + interfaceid=dict(default=None, type='int'), + ztype=dict(default='trapper', type='str'), + value_type=dict(default='float', type='str'), + delay=dict(default=60, type='int'), + lifetime=dict(default=30, type='int'), + template_name=dict(default=[], type='list'), + state=dict(default='present', type='str'), + status=dict(default='enabled', type='str'), + discoveryrule_name=dict(default=None, type='str'), + applications=dict(default=[], type='list'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'itemprototype' + idname = "itemid" + dname = module.params['name'] + state = module.params['state'] + + # selectInterfaces doesn't appear to be working but is needed. + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': dname}, + 'selectApplications': 'applicationid', + 'selectDiscoveryRule': 'itemid', + #'selectDhosts': 'dhostid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + template = get_template(zapi, module.params['template_name']) + params = {'name': dname, + 'key_': module.params['key'], + 'hostid': template['templateid'], + 'interfaceid': module.params['interfaceid'], + 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), + 'type': get_type(module.params['ztype']), + 'value_type': get_value_type(module.params['value_type']), + 'applications': get_app_ids(zapi, module.params['applications']), + } + if params['type'] in [2, 5, 7, 8, 11, 15]: + params.pop('interfaceid') + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'ruleid': + if value != zab_results['discoveryRule']['itemid']: + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_mediatype.py b/roles/lib_zabbix/library/zbx_mediatype.py new file mode 100644 index 000000000..cc72cc9a8 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_mediatype.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python +''' + Ansible module for mediatype +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix mediatype ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_mtype(mtype): + ''' + Transport used by the media type. + Possible values: + 0 - email; + 1 - script; + 2 - SMS; + 3 - Jabber; + 100 - Ez Texting. + ''' + mtype = mtype.lower() + media_type = None + if mtype == 'script': + media_type = 1 + elif mtype == 'sms': + media_type = 2 + elif mtype == 'jabber': + media_type = 3 + elif mtype == 'script': + media_type = 100 + else: + media_type = 0 + + return media_type + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + description=dict(default=None, type='str'), + mtype=dict(default=None, type='str'), + smtp_server=dict(default=None, type='str'), + smtp_helo=dict(default=None, type='str'), + smtp_email=dict(default=None, type='str'), + passwd=dict(default=None, type='str'), + path=dict(default=None, type='str'), + username=dict(default=None, type='str'), + status=dict(default='enabled', type='str'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'mediatype' + idname = "mediatypeid" + description = module.params['description'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + status = 1 + if module.params['status']: + status = 0 + params = {'description': description, + 'type': get_mtype(module.params['mtype']), + 'smtp_server': module.params['smtp_server'], + 'smtp_helo': module.params['smtp_helo'], + 'smtp_email': module.params['smtp_email'], + 'passwd': module.params['passwd'], + 'exec_path': module.params['path'], + 'username': module.params['username'], + 'status': status, + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if zab_results[key] != value and \ + zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_template.py b/roles/lib_zabbix/library/zbx_template.py new file mode 100644 index 000000000..1592fa268 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_template.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +''' +Ansible module for template +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix template ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def main(): + ''' Ansible module for template + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'template' + idname = 'templateid' + tname = module.params['name'] + state = module.params['state'] + # get a template, see if it exists + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'host': tname}, + 'selectParentTemplates': 'templateid', + 'selectGroups': 'groupid', + 'selectApplications': 'applicationid', + 'selectDiscoveries': 'extend', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + if not tname: + module.exit_json(failed=True, + changed=False, + results='Must specifiy a template name.', + state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'groups': module.params.get('groups', [{'groupid': '1'}]), + 'host': tname, + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'templates' and zab_results.has_key('parentTemplates'): + if zab_results['parentTemplates'] != value: + differences[key] = value + elif zab_results[key] != str(value) and zab_results[key] != value: + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=content['result'], state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py new file mode 100644 index 000000000..ffad5b40a --- /dev/null +++ b/roles/lib_zabbix/library/zbx_trigger.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +''' +ansible module for zabbix triggers +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix trigger ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_priority(priority): + ''' determine priority + ''' + prior = 0 + if 'info' in priority: + prior = 1 + elif 'warn' in priority: + prior = 2 + elif 'avg' == priority or 'ave' in priority: + prior = 3 + elif 'high' in priority: + prior = 4 + elif 'dis' in priority: + prior = 5 + + return prior + +def get_deps(zapi, deps): + ''' get trigger dependencies + ''' + results = [] + for desc in deps: + content = zapi.get_content('trigger', + 'get', + {'search': {'description': desc}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if content.has_key('result'): + results.append({'triggerid': content['result'][0]['triggerid']}) + + return results + +def main(): + ''' + Create a trigger in zabbix + + Example: + "params": { + "description": "Processor load is too high on {HOST.NAME}", + "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", + "dependencies": [ + { + "triggerid": "14062" + } + ] + }, + + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + expression=dict(default=None, type='str'), + description=dict(default=None, type='str'), + dependencies=dict(default=[], type='list'), + priority=dict(default='avg', type='str'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'trigger' + idname = "triggerid" + state = module.params['state'] + description = module.params['description'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'description': description}, + 'expandExpression': True, + 'selectDependencies': 'triggerid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + params = {'description': description, + 'expression': module.params['expression'], + 'dependencies': get_deps(zapi, module.params['dependencies']), + 'priority': get_priority(module.params['priority']), + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py new file mode 100644 index 000000000..a9906d773 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_user.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +''' +ansible module for zabbix users +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_usergroups(zapi, usergroups): + ''' Get usergroups + ''' + ugroups = [] + for ugr in usergroups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': ugr}, + #'selectUsers': 'userid', + #'getRights': 'extend' + }) + if content['result']: + ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) + + return ugroups or None + +def get_passwd(passwd): + '''Determine if password is set, if not, return 'zabbix' + ''' + if passwd: + return passwd + + return 'zabbix' + +def get_usertype(user_type): + ''' + Determine zabbix user account type + ''' + if not user_type: + return None + + utype = 1 + if 'super' in user_type: + utype = 3 + elif 'admin' in user_type or user_type == 'admin': + utype = 2 + + return utype + +def main(): + ''' + ansible zabbix module for users + ''' + + ##def user(self, name, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + login=dict(default=None, type='str'), + first_name=dict(default=None, type='str'), + last_name=dict(default=None, type='str'), + user_type=dict(default=None, type='str'), + password=dict(default=None, type='str'), + update_password=dict(default=False, type='bool'), + user_groups=dict(default=[], type='list'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + ## before we can create a user media and users with media types we need media + zbx_class_name = 'user' + idname = "userid" + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'output': 'extend', + 'search': {'alias': module.params['login']}, + "selectUsrgrps": 'usergrpid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content) or len(content['result']) == 0: + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + + params = {'alias': module.params['login'], + 'passwd': get_passwd(module.params['password']), + 'usrgrps': get_usergroups(zapi, module.params['user_groups']), + 'name': module.params['first_name'], + 'surname': module.params['last_name'], + 'type': get_usertype(module.params['user_type']), + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + + if content.has_key('Error'): + module.exit_json(failed=True, changed=False, results=content, state='present') + + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + + # Update password + if not module.params['update_password']: + params.pop('passwd', None) + + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'usrgrps': + # this must be done as a list of ordered dictionaries fails comparison + if not all([True for _ in zab_results[key][0] if _ in value[0]]): + differences[key] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py new file mode 100644 index 000000000..aad3554dd --- /dev/null +++ b/roles/lib_zabbix/library/zbx_user_media.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python +''' + Ansible module for user media +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix user media ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_mtype(zapi, mtype): + '''Get mediatype + + If passed an int, return it as the mediatypeid + if its a string, then try to fetch through a description + ''' + if isinstance(mtype, int): + return mtype + try: + return int(mtype) + except ValueError: + pass + + content = zapi.get_content('mediatype', 'get', {'search': {'description': mtype}}) + if content.has_key['result'] and content['result']: + return content['result'][0]['mediatypeid'] + + return None + +def get_user(zapi, user): + ''' Get userids from user aliases + ''' + content = zapi.get_content('user', 'get', {'search': {'alias': user}}) + if content['result']: + return content['result'][0] + + return None + +def get_severity(severity): + ''' determine severity + ''' + if isinstance(severity, int) or \ + isinstance(severity, str): + return severity + + val = 0 + sev_map = { + 'not': 2**0, + 'inf': 2**1, + 'war': 2**2, + 'ave': 2**3, + 'avg': 2**3, + 'hig': 2**4, + 'dis': 2**5, + } + for level in severity: + val |= sev_map[level[:3].lower()] + return val + +def get_zbx_user_query_data(zapi, user_name): + ''' If name exists, retrieve it, and build query params. + ''' + query = {} + if user_name: + zbx_user = get_user(zapi, user_name) + query = {'userid': zbx_user['userid']} + + return query + +def find_media(medias, user_media): + ''' Find the user media in the list of medias + ''' + for media in medias: + if all([media[key] == user_media[key] for key in user_media.keys()]): + return media + return None + +def get_active(in_active): + '''Determine active value + ''' + active = 1 + if in_active: + active = 0 + + return active + +def get_mediatype(zapi, mediatype, mediatype_desc): + ''' Determine mediatypeid + ''' + mtypeid = None + if mediatype: + mtypeid = get_mtype(zapi, mediatype) + elif mediatype_desc: + mtypeid = get_mtype(zapi, mediatype_desc) + + return mtypeid + +def main(): + ''' + Ansible zabbix module for mediatype + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ['ZABBIX_USER'], type='str'), + zbx_password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), + zbx_debug=dict(default=False, type='bool'), + login=dict(default=None, type='str'), + active=dict(default=False, type='bool'), + medias=dict(default=None, type='list'), + mediaid=dict(default=None, type='int'), + mediatype=dict(default=None, type='str'), + mediatype_desc=dict(default=None, type='str'), + #d-d,hh:mm-hh:mm;d-d,hh:mm-hh:mm... + period=dict(default=None, type='str'), + sendto=dict(default=None, type='str'), + severity=dict(default=None, type='str'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'user' + idname = "mediaid" + state = module.params['state'] + + # User media is fetched through the usermedia.get + zbx_user_query = get_zbx_user_query_data(zapi, module.params['login']) + content = zapi.get_content('usermedia', 'get', zbx_user_query) + + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content) or len(content['result']) == 0: + module.exit_json(changed=False, state="absent") + + if not module.params['login']: + module.exit_json(failed=True, changed=False, results='Must specifiy a user login.', state="absent") + + content = zapi.get_content(zbx_class_name, 'deletemedia', [content['result'][0][idname]]) + + if content.has_key('error'): + module.exit_json(changed=False, results=content['error'], state="absent") + + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + active = get_active(module.params['active']) + mtypeid = get_mediatype(zapi, module.params['mediatype'], module.params['mediatype_desc']) + + medias = module.params['medias'] + if medias == None: + medias = [{'mediatypeid': mtypeid, + 'sendto': module.params['sendto'], + 'active': active, + 'severity': int(get_severity(module.params['severity'])), + 'period': module.params['period'], + }] + + params = {'users': [zbx_user_query], + 'medias': medias, + 'output': 'extend', + } + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'addmedia', params) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state='present') + + # mediaid signifies an update + # If user params exists, check to see if they already exist in zabbix + # if they exist, then return as no update + # elif they do not exist, then take user params only + diff = {'medias': [], 'users': {}} + _ = [diff['medias'].append(media) for media in params['medias'] if not find_media(content['result'], media)] + + if not diff['medias']: + module.exit_json(changed=False, results=content['result'], state="present") + + for user in params['users']: + diff['users']['userid'] = user['userid'] + + # We have differences and need to update + content = zapi.get_content(zbx_class_name, 'updatemedia', diff) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_usergroup.py b/roles/lib_zabbix/library/zbx_usergroup.py new file mode 100644 index 000000000..297d8ef91 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_usergroup.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +''' +zabbix ansible module for usergroups +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix usergroup ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_rights(zapi, rights): + '''Get rights + ''' + if rights == None: + return None + + perms = [] + for right in rights: + hstgrp = right.keys()[0] + perm = right.values()[0] + content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) + if content['result']: + permission = 0 + if perm == 'ro': + permission = 2 + elif perm == 'rw': + permission = 3 + perms.append({'id': content['result'][0]['groupid'], + 'permission': permission}) + return perms + +def get_gui_access(access): + ''' Return the gui_access for a usergroup + ''' + access = access.lower() + if access == 'internal': + return 1 + elif access == 'disabled': + return 2 + + return 0 + +def get_debug_mode(mode): + ''' Return the debug_mode for a usergroup + ''' + mode = mode.lower() + if mode == 'enabled': + return 1 + + return 0 + +def get_user_status(status): + ''' Return the user_status for a usergroup + ''' + status = status.lower() + if status == 'enabled': + return 0 + + return 1 + + +#def get_userids(zapi, users): +# ''' Get userids from user aliases +# ''' +# if not users: +# return None +# +# userids = [] +# for alias in users: +# content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) +# if content['result']: +# userids.append(content['result'][0]['userid']) +# +# return userids + +def main(): + ''' Ansible module for usergroup + ''' + + ##def usergroup(self, name, rights=None, users=None, state='present', params=None): + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), + zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), + zbx_debug=dict(default=False, type='bool'), + debug_mode=dict(default='disabled', type='str'), + gui_access=dict(default='default', type='str'), + status=dict(default='enabled', type='str'), + name=dict(default=None, type='str', required=True), + rights=dict(default=None, type='list'), + #users=dict(default=None, type='list'), + state=dict(default='present', type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + zbx_class_name = 'usergroup' + idname = "usrgrpid" + uname = module.params['name'] + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'search': {'name': uname}, + 'selectUsers': 'userid', + }) + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + if not uname: + module.exit_json(failed=True, changed=False, results='Need to pass in a user.', state="error") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) + module.exit_json(changed=True, results=content['result'], state="absent") + + if state == 'present': + + params = {'name': uname, + 'rights': get_rights(zapi, module.params['rights']), + 'users_status': get_user_status(module.params['status']), + 'gui_access': get_gui_access(module.params['gui_access']), + 'debug_mode': get_debug_mode(module.params['debug_mode']), + #'userids': get_userids(zapi, module.params['users']), + } + + _ = [params.pop(key, None) for key in params.keys() if params[key] == None] + + if not exists(content): + # if we didn't find it, create it + content = zapi.get_content(zbx_class_name, 'create', params) + module.exit_json(changed=True, results=content['result'], state='present') + # already exists, we need to update it + # let's compare properties + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + if key == 'rights': + differences['rights'] = value + + #elif key == 'userids' and zab_results.has_key('users'): + #if zab_results['users'] != value: + #differences['userids'] = value + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + # We have differences and need to update + differences[idname] = zab_results[idname] + content = zapi.get_content(zbx_class_name, 'update', differences) + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml new file mode 100644 index 000000000..022ca52f2 --- /dev/null +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -0,0 +1,61 @@ +--- +- debug: var=template + +- name: Template Create Template + zbx_template: + zbx_server: "{{ server }}" + zbx_user: "{{ user }}" + zbx_password: "{{ password }}" + name: "{{ template.name }}" + register: created_template + +- debug: var=created_template + +- set_fact: + lzbx_applications: "{{ template.zitems | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}" + +- debug: var=lzbx_applications + +- name: Create Application + zbx_application: + zbx_server: "{{ server }}" + zbx_user: "{{ user }}" + zbx_password: "{{ password }}" + name: "{{ item }}" + template_name: "{{ template.name }}" + with_items: lzbx_applications + register: created_application + when: template.zitems is defined + +- debug: var=created_application + +- name: Create Items + zbx_item: + zbx_server: "{{ server }}" + zbx_user: "{{ user }}" + zbx_password: "{{ password }}" + key: "{{ item.key }}" + name: "{{ item.name | default(item.key, true) }}" + value_type: "{{ item.value_type | default('int') }}" + template_name: "{{ template.name }}" + applications: "{{ item.applications }}" + with_items: template.zitems + register: created_items + when: template.zitems is defined + +#- debug: var=ctp_created_items + +- name: Create Triggers + zbx_trigger: + zbx_server: "{{ server }}" + zbx_user: "{{ user }}" + zbx_password: "{{ password }}" + description: "{{ item.description }}" + expression: "{{ item.expression }}" + priority: "{{ item.priority }}" + with_items: template.ztriggers + when: template.ztriggers is defined + +#- debug: var=ctp_created_triggers + + diff --git a/roles/lib_zabbix/tasks/create_user.yml b/roles/lib_zabbix/tasks/create_user.yml new file mode 100644 index 000000000..1f752a9e1 --- /dev/null +++ b/roles/lib_zabbix/tasks/create_user.yml @@ -0,0 +1,11 @@ +--- +- name: Update zabbix credentialss for a user + zbx_user: + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + alias: "{{ ozb_username }}" + passwd: "{{ ozb_new_password | default(ozb_password, true) }}" + register: user + +- debug: var=user.results diff --git a/roles/os_zabbix/README.md b/roles/os_zabbix/README.md new file mode 100644 index 000000000..ac3dc2833 --- /dev/null +++ b/roles/os_zabbix/README.md @@ -0,0 +1,40 @@ +os_zabbix +========= + +Automate zabbix tasks. + +Requirements +------------ + +This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Role Variables +-------------- + +zab_server +zab_username +zab_password + +Dependencies +------------ + +This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now. + +Example Playbook +---------------- + + - zbx_host: + server: zab_server + user: zab_user + password: zab_password + name: 'myhost' + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/os_zabbix/defaults/main.yml b/roles/os_zabbix/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/handlers/main.yml b/roles/os_zabbix/handlers/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/handlers/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/os_zabbix/library/get_drule.yml b/roles/os_zabbix/library/get_drule.yml deleted file mode 100644 index a3e39f535..000000000 --- a/roles/os_zabbix/library/get_drule.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -# This is a test playbook to create one of each of the zabbix ansible modules. -# ensure that the zbxapi module is installed -# ansible-playbook test.yml -- name: Test zabbix ansible module - hosts: localhost - gather_facts: no - vars: -#zbx_server: https://localhost/zabbix/api_jsonrpc.php -#zbx_user: Admin -#zbx_password: zabbix - - pre_tasks: - - name: Template Discovery rules - zbx_template: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Template App HaProxy' - state: list - register: template_output - - - debug: var=template_output - - - name: Discovery rules - zbx_discovery_rule: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'haproxy.discovery sender' - state: list - register: drule - - - debug: var=drule - -# - name: Create an application -# zbx_application: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'Test App' -# template_name: "test template" -# register: item_output -# -# - name: Create an item -# zbx_item: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'test item' -# key: 'kenny.item.1' -# applications: -# - 'Test App' -# template_name: "test template" -# register: item_output -# -# - debug: var=item_output -# -# - name: Create an trigger -# zbx_trigger: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# expression: '{test template:kenny.item.1.last()}>2' -# description: 'Kenny desc' -# register: trigger_output -# -# - debug: var=trigger_output -# -# - name: Create a hostgroup -# zbx_hostgroup: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'kenny hostgroup' -# register: hostgroup_output -# -# - debug: var=hostgroup_output -# -# - name: Create a host -# zbx_host: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: 'kenny host' -# template_names: -# - test template -# hostgroup_names: -# - kenny hostgroup -# register: host_output -# -# - debug: var=host_output -# -# - name: Create a usergroup -# zbx_usergroup: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# name: kenny usergroup -# rights: -# - 'kenny hostgroup': rw -# register: usergroup_output -# -# - debug: var=usergroup_output -# -# - name: Create a user -# zbx_user: -# server: "{{ zbx_server }}" -# user: "{{ zbx_user }}" -# password: "{{ zbx_password }}" -# alias: kwoodson -# state: list -# register: user_output -# -# - debug: var=user_output diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml deleted file mode 100644 index cedace1a0..000000000 --- a/roles/os_zabbix/library/test.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -# This is a test playbook to create one of each of the zabbix ansible modules. -# ensure that the zbxapi module is installed -# ansible-playbook test.yml -- name: Test zabbix ansible module - hosts: localhost - gather_facts: no - vars: - zbx_server: http://localhost:8080/zabbix/api_jsonrpc.php - zbx_user: Admin - zbx_password: zabbix - - pre_tasks: - - name: Create a template - zbx_template: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test template' - register: template_output - - - debug: var=template_output - - - name: Create a discoveryrule - zbx_discoveryrule: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: test discoverule - key: test_listener - template_name: test template - lifetime: 14 - register: discoveryrule - - - debug: var=discoveryrule - - - name: Create an itemprototype - zbx_itemprototype: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test itemprototype on {#TEST_LISTENER}' - key: 'test[{#TEST_LISTENER}]' - template_name: test template - discoveryrule_name: test discoverule - register: itemproto - - - debug: var=itemproto - - - name: Create an application - zbx_application: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'Test App' - template_name: "test template" - register: item_output - - - name: Create an item - zbx_item: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'test item' - key: 'kenny.item.1' - applications: - - 'Test App' - template_name: "test template" - register: item_output - - - debug: var=item_output - - - name: Create an trigger - zbx_trigger: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - expression: '{test template:kenny.item.1.last()}>2' - description: 'Kenny desc' - register: trigger_output - - - debug: var=trigger_output - - - name: Create a hostgroup - zbx_hostgroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny hostgroup' - register: hostgroup_output - - - debug: var=hostgroup_output - - - name: Create a host - zbx_host: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: 'kenny host' - template_names: - - test template - hostgroup_names: - - kenny hostgroup - register: host_output - - - debug: var=host_output - - - name: Create a usergroup - zbx_usergroup: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - name: kenny usergroup - rights: - - 'kenny hostgroup': rw - register: usergroup_output - - - debug: var=usergroup_output - - - name: Create a user - zbx_user: - server: "{{ zbx_server }}" - user: "{{ zbx_user }}" - password: "{{ zbx_password }}" - alias: kenny user - passwd: zabbix - usergroups: - - kenny usergroup - register: user_output - - - debug: var=user_output diff --git a/roles/os_zabbix/library/zbx_application.py b/roles/os_zabbix/library/zbx_application.py deleted file mode 100644 index 5d4acf72d..000000000 --- a/roles/os_zabbix/library/zbx_application.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for application -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix application ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append(content['result'][0]['templateid']) - return template_ids - -def main(): - ''' Ansible module for application - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - template_name=dict(default=None, type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the application for the rest of the calls - zbx_class_name = 'application' - idname = 'applicationid' - aname = module.params['name'] - state = module.params['state'] - # get a applicationid, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': aname}, - 'selectHost': 'hostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0], - 'name': aname, - } - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_discoveryrule.py b/roles/os_zabbix/library/zbx_discoveryrule.py deleted file mode 100644 index 56b87fecc..000000000 --- a/roles/os_zabbix/library/zbx_discoveryrule.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(vtype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'JMX': 16, - } - - for typ in _types.keys(): - if vtype in typ or vtype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'discoveryrule' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - #'selectDServices': 'extend', - #'selectDChecks': 'extend', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'lifetime': module.params['lifetime'], - 'type': get_type(module.params['ztype']), - } - if params['type'] in [2, 5, 7, 11]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py deleted file mode 100644 index 12c5f3456..000000000 --- a/roles/os_zabbix/library/zbx_host.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix host ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_group_ids(zapi, hostgroup_names): - ''' - get hostgroups - ''' - # Fetch groups by name - group_ids = [] - for hgr in hostgroup_names: - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}}) - if content.has_key('result'): - group_ids.append({'groupid': content['result'][0]['groupid']}) - - return group_ids - -def get_template_ids(zapi, template_names): - ''' - get related templates - ''' - template_ids = [] - # Fetch templates by name - for template_name in template_names: - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - if content.has_key('result'): - template_ids.append({'templateid': content['result'][0]['templateid']}) - return template_ids - -def main(): - ''' - Ansible module for zabbix host - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - hostgroup_names=dict(default=[], type='list'), - template_names=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - interfaces=dict(default=None, type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'host' - idname = "hostid" - hname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': hname}, - 'selectGroups': 'groupid', - 'selectParentTemplates': 'templateid', - 'selectInterfaces': 'interfaceid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent - 'main': 1, # default interface? 1 = true - 'useip': 1, # default interface? 1 = true - 'ip': '127.0.0.1', # default interface? 1 = true - 'dns': '', # dns for host - 'port': '10050', # port for interface? 10050 - }] - params = {'host': hname, - 'groups': get_group_ids(zapi, module.params['hostgroup_names']), - 'templates': get_template_ids(zapi, module.params['template_names']), - 'interfaces': ifs, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py deleted file mode 100644 index a1eb875d4..000000000 --- a/roles/os_zabbix/library/zbx_hostgroup.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -''' Ansible module for hostgroup -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix hostgroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' ansible module for hostgroup - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'hostgroup' - idname = "groupid" - hname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': hname}, - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': hname} - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py deleted file mode 100644 index 45ba6c2b0..000000000 --- a/roles/os_zabbix/library/zbx_item.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for zabbix items -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix item ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - ansible zabbix module for zbx_item - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - template_name=dict(default=None, type='str'), - zabbix_type=dict(default=2, type='int'), - value_type=dict(default='int', type='str'), - applications=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'item' - idname = "itemid" - state = module.params['state'] - key = module.params['key'] - template_name = module.params['template_name'] - - content = zapi.get_content('template', 'get', {'search': {'host': template_name}}) - templateid = None - if content['result']: - templateid = content['result'][0]['templateid'] - else: - module.exit_json(changed=False, - results='Error: Could find template with name %s for item.' % template_name, - state="Unkown") - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'key_': key}, - 'selectApplications': 'applicationid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': module.params.get('name', module.params['key']), - 'key_': key, - 'hostid': templateid, - 'type': module.params['zabbix_type'], - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_itemprototype.py b/roles/os_zabbix/library/zbx_itemprototype.py deleted file mode 100644 index f0eb6bbbd..000000000 --- a/roles/os_zabbix/library/zbx_itemprototype.py +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python -''' -Zabbix discovery rule ansible module -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rule_id(zapi, discoveryrule_name): - '''get a discoveryrule by name - ''' - content = zapi.get_content('discoveryrule', - 'get', - {'search': {'name': discoveryrule_name}, - 'output': 'extend', - }) - if not content['result']: - return None - return content['result'][0]['itemid'] - -def get_template(zapi, template_name): - '''get a template by name - ''' - content = zapi.get_content('template', - 'get', - {'search': {'host': template_name}, - 'output': 'extend', - 'selectInterfaces': 'interfaceid', - }) - if not content['result']: - return None - return content['result'][0] - -def get_type(ztype): - ''' - Determine which type of discoverrule this is - ''' - _types = {'agent': 0, - 'SNMPv1': 1, - 'trapper': 2, - 'simple': 3, - 'SNMPv2': 4, - 'internal': 5, - 'SNMPv3': 6, - 'active': 7, - 'aggregate': 8, - 'external': 10, - 'database monitor': 11, - 'ipmi': 12, - 'ssh': 13, - 'telnet': 14, - 'calculated': 15, - 'JMX': 16, - } - - for typ in _types.keys(): - if ztype in typ or ztype == typ: - _vtype = _types[typ] - break - else: - _vtype = 2 - - return _vtype - -def get_value_type(value_type): - ''' - Possible values: - 0 - numeric float; - 1 - character; - 2 - log; - 3 - numeric unsigned; - 4 - text - ''' - vtype = 0 - if 'int' in value_type: - vtype = 3 - elif 'char' in value_type: - vtype = 1 - elif 'str' in value_type: - vtype = 4 - - return vtype - -def get_status(status): - ''' Determine status - ''' - _status = 0 - if status == 'disabled': - _status = 1 - elif status == 'unsupported': - _status = 3 - - return _status - -def get_app_ids(zapi, application_names): - ''' get application ids from names - ''' - app_ids = [] - for app_name in application_names: - content = zapi.get_content('application', 'get', {'search': {'name': app_name}}) - if content.has_key('result'): - app_ids.append(content['result'][0]['applicationid']) - return app_ids - -def main(): - ''' - Ansible module for zabbix discovery rules - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=os.environ['ZABBIX_USER'], type='str'), - password=dict(default=os.environ['ZABBIX_PASSWORD'], type='str'), - name=dict(default=None, type='str'), - key=dict(default=None, type='str'), - interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), - value_type=dict(default='float', type='str'), - delay=dict(default=60, type='int'), - lifetime=dict(default=30, type='int'), - template_name=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - status=dict(default='enabled', type='str'), - discoveryrule_name=dict(default=None, type='str'), - applications=dict(default=[], type='list'), - ), - #supports_check_mode=True - ) - - user = module.params['user'] - passwd = module.params['password'] - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'itemprototype' - idname = "itemid" - dname = module.params['name'] - state = module.params['state'] - - # selectInterfaces doesn't appear to be working but is needed. - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': dname}, - 'selectApplications': 'applicationid', - 'selectDiscoveryRule': 'itemid', - #'selectDhosts': 'dhostid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - template = get_template(zapi, module.params['template_name']) - params = {'name': dname, - 'key_': module.params['key'], - 'hostid': template['templateid'], - 'interfaceid': module.params['interfaceid'], - 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']), - 'type': get_type(module.params['ztype']), - 'value_type': get_value_type(module.params['value_type']), - 'applications': get_app_ids(zapi, module.params['applications']), - } - if params['type'] in [2, 5, 7, 8, 11, 15]: - params.pop('interfaceid') - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if key == 'ruleid': - if value != zab_results['discoveryRule']['itemid']: - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py deleted file mode 100644 index a49aecd0f..000000000 --- a/roles/os_zabbix/library/zbx_mediatype.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -''' - Ansible module for mediatype -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix mediatype ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True -def get_mtype(mtype): - ''' - Transport used by the media type. - Possible values: - 0 - email; - 1 - script; - 2 - SMS; - 3 - Jabber; - 100 - Ez Texting. - ''' - mtype = mtype.lower() - media_type = None - if mtype == 'script': - media_type = 1 - elif mtype == 'sms': - media_type = 2 - elif mtype == 'jabber': - media_type = 3 - elif mtype == 'script': - media_type = 100 - else: - media_type = 0 - - return media_type - -def main(): - ''' - Ansible zabbix module for mediatype - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - description=dict(default=None, type='str'), - mtype=dict(default=None, type='str'), - smtp_server=dict(default=None, type='str'), - smtp_helo=dict(default=None, type='str'), - smtp_email=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'mediatype' - idname = "mediatypeid" - description = module.params['description'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}}) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'description': description, - 'type': get_mtype(module.params['media_type']), - 'smtp_server': module.params['smtp_server'], - 'smtp_helo': module.params['smtp_helo'], - 'smtp_email': module.params['smtp_email'], - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if zab_results[key] != value and \ - zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py deleted file mode 100644 index 20ea48a85..000000000 --- a/roles/os_zabbix/library/zbx_template.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -''' -Ansible module for template -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix template ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def main(): - ''' Ansible module for template - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug']) - zapi = ZabbixAPI(zbc) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'template' - idname = 'templateid' - tname = module.params['name'] - state = module.params['state'] - # get a template, see if it exists - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'host': tname}, - 'selectParentTemplates': 'templateid', - 'selectGroups': 'groupid', - 'selectApplications': 'applicationid', - 'selectDiscoveries': 'extend', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'groups': module.params.get('groups', [{'groupid': '1'}]), - 'host': tname, - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'templates' and zab_results.has_key('parentTemplates'): - if zab_results['parentTemplates'] != value: - differences[key] = value - elif zab_results[key] != str(value) and zab_results[key] != value: - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=content['result'], state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py deleted file mode 100644 index 7cc9356c8..000000000 --- a/roles/os_zabbix/library/zbx_trigger.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix triggers -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix trigger ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - - -def get_priority(priority): - ''' determine priority - ''' - prior = 0 - if 'info' in priority: - prior = 1 - elif 'warn' in priority: - prior = 2 - elif 'avg' == priority or 'ave' in priority: - prior = 3 - elif 'high' in priority: - prior = 4 - elif 'dis' in priority: - prior = 5 - - return prior - -def get_deps(zapi, deps): - ''' get trigger dependencies - ''' - results = [] - for desc in deps: - content = zapi.get_content('trigger', - 'get', - {'search': {'description': desc}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if content.has_key('result'): - results.append({'triggerid': content['result'][0]['triggerid']}) - - return results - -def main(): - ''' - Create a trigger in zabbix - - Example: - "params": { - "description": "Processor load is too high on {HOST.NAME}", - "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", - "dependencies": [ - { - "triggerid": "14062" - } - ] - }, - - ''' - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - expression=dict(default=None, type='str'), - description=dict(default=None, type='str'), - dependencies=dict(default=[], type='list'), - priority=dict(default='avg', type='str'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - #Set the instance and the template for the rest of the calls - zbx_class_name = 'trigger' - idname = "triggerid" - state = module.params['state'] - description = module.params['description'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'description': description}, - 'expandExpression': True, - 'selectDependencies': 'triggerid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'description': description, - 'expression': module.params['expression'], - 'dependencies': get_deps(zapi, module.params['dependencies']), - 'priority': get_priority(module.params['priority']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - - if zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py deleted file mode 100644 index c45c9a75d..000000000 --- a/roles/os_zabbix/library/zbx_user.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python -''' -ansible module for zabbix users -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix user ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_usergroups(zapi, usergroups): - ''' Get usergroups - ''' - ugroups = [] - for ugr in usergroups: - content = zapi.get_content('usergroup', - 'get', - {'search': {'name': ugr}, - #'selectUsers': 'userid', - #'getRights': 'extend' - }) - if content['result']: - ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) - - return ugroups or None - -def get_usertype(user_type): - ''' - Determine zabbix user account type - ''' - if not user_type: - return None - - utype = 1 - if 'super' in user_type: - utype = 3 - elif 'admin' in user_type or user_type == 'admin': - utype = 2 - - return utype - -def main(): - ''' - ansible zabbix module for users - ''' - - ##def user(self, name, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - alias=dict(default=None, type='str'), - name=dict(default=None, type='str'), - surname=dict(default=None, type='str'), - user_type=dict(default=None, type='str'), - passwd=dict(default=None, type='str'), - usergroups=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - password = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, password, module.params['debug'])) - - ## before we can create a user media and users with media types we need media - zbx_class_name = 'user' - idname = "userid" - alias = module.params['alias'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'output': 'extend', - 'search': {'alias': alias}, - "selectUsrgrps": 'usergrpid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'alias': alias, - 'passwd': module.params['passwd'], - 'usrgrps': get_usergroups(zapi, module.params['usergroups']), - 'name': module.params['name'], - 'surname': module.params['surname'], - 'type': get_usertype(module.params['user_type']), - } - - # Remove any None valued params - _ = [params.pop(key, None) for key in params.keys() if params[key] is None] - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'passwd': - differences[key] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py deleted file mode 100644 index ede4c9df1..000000000 --- a/roles/os_zabbix/library/zbx_usergroup.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -''' -zabbix ansible module for usergroups -''' -# vim: expandtab:tabstop=4:shiftwidth=4 -# -# Zabbix usergroup ansible module -# -# -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is in place because each module looks similar to each other. -# These need duplicate code as their behavior is very similar -# but different for each zabbix class. -# pylint: disable=duplicate-code - -# pylint: disable=import-error -from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection - -def exists(content, key='result'): - ''' Check if key exists in content or the size of content[key] > 0 - ''' - if not content.has_key(key): - return False - - if not content[key]: - return False - - return True - -def get_rights(zapi, rights): - '''Get rights - ''' - perms = [] - for right in rights: - hstgrp = right.keys()[0] - perm = right.values()[0] - content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}}) - if content['result']: - permission = 0 - if perm == 'ro': - permission = 2 - elif perm == 'rw': - permission = 3 - perms.append({'id': content['result'][0]['groupid'], - 'permission': permission}) - return perms - -def get_userids(zapi, users): - ''' Get userids from user aliases - ''' - userids = [] - for alias in users: - content = zapi.get_content('user', 'get', {'search': {'alias': alias}}) - if content['result']: - userids.append(content['result'][0]['userid']) - - return userids - -def main(): - ''' Ansible module for usergroup - ''' - - ##def usergroup(self, name, rights=None, users=None, state='present', params=None): - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), - user=dict(default=None, type='str'), - password=dict(default=None, type='str'), - name=dict(default=None, type='str'), - rights=dict(default=[], type='list'), - users=dict(default=[], type='list'), - debug=dict(default=False, type='bool'), - state=dict(default='present', type='str'), - ), - #supports_check_mode=True - ) - - user = module.params.get('user', os.environ['ZABBIX_USER']) - passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD']) - - zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])) - - zbx_class_name = 'usergroup' - idname = "usrgrpid" - uname = module.params['name'] - state = module.params['state'] - - content = zapi.get_content(zbx_class_name, - 'get', - {'search': {'name': uname}, - 'selectUsers': 'userid', - }) - if state == 'list': - module.exit_json(changed=False, results=content['result'], state="list") - - if state == 'absent': - if not exists(content): - module.exit_json(changed=False, state="absent") - - content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) - module.exit_json(changed=True, results=content['result'], state="absent") - - if state == 'present': - params = {'name': uname, - 'rights': get_rights(zapi, module.params['rights']), - 'userids': get_userids(zapi, module.params['users']), - } - - if not exists(content): - # if we didn't find it, create it - content = zapi.get_content(zbx_class_name, 'create', params) - module.exit_json(changed=True, results=content['result'], state='present') - # already exists, we need to update it - # let's compare properties - differences = {} - zab_results = content['result'][0] - for key, value in params.items(): - if key == 'rights': - differences['rights'] = value - - elif key == 'userids' and zab_results.has_key('users'): - if zab_results['users'] != value: - differences['userids'] = value - - elif zab_results[key] != value and zab_results[key] != str(value): - differences[key] = value - - if not differences: - module.exit_json(changed=False, results=zab_results, state="present") - - # We have differences and need to update - differences[idname] = zab_results[idname] - content = zapi.get_content(zbx_class_name, 'update', differences) - module.exit_json(changed=True, results=content['result'], state="present") - - module.exit_json(failed=True, - changed=False, - results='Unknown state passed. %s' % state, - state="unknown") - -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled -# import module snippets. This are required -from ansible.module_utils.basic import * - -main() diff --git a/roles/os_zabbix/meta/main.yml b/roles/os_zabbix/meta/main.yml new file mode 100644 index 000000000..360f5aad2 --- /dev/null +++ b/roles/os_zabbix/meta/main.yml @@ -0,0 +1,9 @@ +--- +galaxy_info: + author: OpenShift + description: ZabbixAPI + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: +- lib_zabbix diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml new file mode 100644 index 000000000..7111c778b --- /dev/null +++ b/roles/os_zabbix/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Main List all templates + zbx_template: + zbx_server: "{{ ozb_server }}" + zbx_user: "{{ ozb_user }}" + zbx_password: "{{ ozb_password }}" + state: list + register: templates + +- debug: var=templates + +- include_vars: template_heartbeat.yml +- include_vars: template_os_linux.yml + +- name: Include Template Heartbeat + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_heartbeat }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + +- name: Include Template os_linux + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_os_linux }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + diff --git a/roles/os_zabbix/vars/main.yml b/roles/os_zabbix/vars/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/os_zabbix/vars/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/os_zabbix/vars/template_heartbeat.yml b/roles/os_zabbix/vars/template_heartbeat.yml new file mode 100644 index 000000000..8389485d3 --- /dev/null +++ b/roles/os_zabbix/vars/template_heartbeat.yml @@ -0,0 +1,12 @@ +--- +g_template_heartbeat: + name: Template Heartbeat + zitems: + - name: Heartbeat Ping + applications: + - Heartbeat + key: heartbeat.ping + ztriggers: + - description: 'Heartbeat.ping has failed on {HOST.NAME}' + expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' + priority: avg diff --git a/roles/os_zabbix/vars/template_host.yml b/roles/os_zabbix/vars/template_host.yml new file mode 100644 index 000000000..e7cc667cb --- /dev/null +++ b/roles/os_zabbix/vars/template_host.yml @@ -0,0 +1,27 @@ +--- +g_template_host: + params: + name: Template Host + host: Template Host + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Host + zitems: + - name: Host Ping + hostid: + key_: host.ping + type: 2 + value_type: 0 + output: extend + search: + key_: host.ping + ztriggers: + - description: 'Host ping has failed on {HOST.NAME}' + expression: '{Template Host:host.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Host ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_master.yml b/roles/os_zabbix/vars/template_master.yml new file mode 100644 index 000000000..5f9b41a4f --- /dev/null +++ b/roles/os_zabbix/vars/template_master.yml @@ -0,0 +1,27 @@ +--- +g_template_master: + params: + name: Template Master + host: Template Master + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Master + zitems: + - name: Master Etcd Ping + hostid: + key_: master.etcd.ping + type: 2 + value_type: 0 + output: extend + search: + key_: master.etcd.ping + ztriggers: + - description: 'Master Etcd ping has failed on {HOST.NAME}' + expression: '{Template Master:master.etcd.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Master Etcd ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_node.yml b/roles/os_zabbix/vars/template_node.yml new file mode 100644 index 000000000..98c343a24 --- /dev/null +++ b/roles/os_zabbix/vars/template_node.yml @@ -0,0 +1,27 @@ +--- +g_template_node: + params: + name: Template Node + host: Template Node + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Node + zitems: + - name: Kubelet Ping + hostid: + key_: kubelet.ping + type: 2 + value_type: 0 + output: extend + search: + key_: kubelet.ping + ztriggers: + - description: 'Kubelet ping has failed on {HOST.NAME}' + expression: '{Template Node:kubelet.ping.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Kubelet ping has failed on*' + expandExpression: True diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml new file mode 100644 index 000000000..1c9d10bb0 --- /dev/null +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -0,0 +1,173 @@ +--- +g_template_os_linux: + name: Template OS Linux + zitems: + - key: kernel.uname.sysname + applications: + - Kernel + value_type: string + + - key: kernel.all.cpu.wait.total + applications: + - Kernel + value_type: int + + - key: kernel.all.cpu.irq.hard + applications: + - Kernel + value_type: int + + - key: kernel.all.cpu.idle + applications: + - Kernel + value_type: int + + - key: kernel.uname.distro + applications: + - Kernel + value_type: string + + - key: kernel.uname.nodename + applications: + - Kernel + value_type: string + + - key: kernel.all.cpu.irq.soft + applications: + - Kernel + value_type: int + + - key: kernel.all.load.15_minute + applications: + - Kernel + value_type: float + + - key: kernel.all.cpu.sys + applications: + - Kernel + value_type: int + + - key: kernel.all.load.5_minute + applications: + - Kernel + value_type: float + + - key: mem.freemem + applications: + - Memory + value_type: int + + - key: kernel.all.cpu.nice + applications: + - Kernel + value_type: int + + - key: mem.util.bufmem + applications: + - Memory + value_type: int + + - key: swap.used + applications: + - Memory + value_type: int + + - key: kernel.all.load.1_minute + applications: + - Kernel + value_type: float + + - key: kernel.uname.version + applications: + - Kernel + value_type: string + + - key: swap.length + applications: + - Memory + value_type: int + + - key: mem.physmem + applications: + - Memory + value_type: int + + - key: kernel.all.uptime + applications: + - Kernel + value_type: int + + - key: swap.free + applications: + - Memory + value_type: int + + - key: mem.util.used + applications: + - Memory + value_type: int + + - key: kernel.all.cpu.user + applications: + - Kernel + value_type: int + + - key: kernel.uname.machine + applications: + - Kernel + value_type: string + + - key: hinv.ncpu + applications: + - Kernel + value_type: int + + - key: mem.util.cached + applications: + - Memory + value_type: int + + - key: kernel.all.cpu.steal + applications: + - Kernel + value_type: int + + - key: kernel.all.pswitch + applications: + - Kernel + value_type: int + + - key: kernel.uname.release + applications: + - Kernel + value_type: string + + - key: proc.nprocs + applications: + - Kernel + value_type: int + + - key: filesys.avail + applications: + - Disk + value_type: int + + - key: filesys.capacity + applications: + - Disk + value_type: int + + - key: filesys.free + applications: + - Disk + value_type: int + + - key: filesys.full + applications: + - Disk + value_type: float + + - key: filesys.used + applications: + - Disk + value_type: float diff --git a/roles/os_zabbix/vars/template_router.yml b/roles/os_zabbix/vars/template_router.yml new file mode 100644 index 000000000..4dae7da1e --- /dev/null +++ b/roles/os_zabbix/vars/template_router.yml @@ -0,0 +1,27 @@ +--- +g_template_router: + params: + name: Template Router + host: Template Router + groups: + - groupid: 1 # FIXME (not real) + output: extend + search: + name: Template Router + zitems: + - name: Router Backends down + hostid: + key_: router.backends.down + type: 2 + value_type: 0 + output: extend + search: + key_: router.backends.down + ztriggers: + - description: 'Number of router backends down on {HOST.NAME}' + expression: '{Template Router:router.backends.down.last()}<>0' + priority: 3 + searchWildcardsEnabled: True + search: + description: 'Number of router backends down on {HOST.NAME}' + expandExpression: True -- cgit v1.2.3 From f0d03d257f2186c91e99c06e34be737468ea6ad6 Mon Sep 17 00:00:00 2001 From: Troy Dawson Date: Thu, 27 Aug 2015 10:27:46 -0500 Subject: Add a role that allows logrotate config editing. This role gets called for each type of machine, but if logrotate_scripts is not set, nothing happens. --- .../openshift-cluster/tasks/launch_instances.yml | 17 ++++++ playbooks/common/openshift-etcd/config.yml | 1 + playbooks/common/openshift-master/config.yml | 1 + playbooks/common/openshift-node/config.yml | 1 + roles/nickhammond.logrotate/.travis.yml | 14 +++++ roles/nickhammond.logrotate/README.md | 71 ++++++++++++++++++++++ .../meta/.galaxy_install_info | 1 + roles/nickhammond.logrotate/meta/main.yml | 15 +++++ roles/nickhammond.logrotate/tasks/main.yml | 10 +++ .../nickhammond.logrotate/templates/logrotate.d.j2 | 16 +++++ roles/nickhammond.logrotate/tests/inventory | 1 + roles/nickhammond.logrotate/tests/test.yml | 18 ++++++ 12 files changed, 166 insertions(+) create mode 100644 roles/nickhammond.logrotate/.travis.yml create mode 100644 roles/nickhammond.logrotate/README.md create mode 100644 roles/nickhammond.logrotate/meta/.galaxy_install_info create mode 100644 roles/nickhammond.logrotate/meta/main.yml create mode 100644 roles/nickhammond.logrotate/tasks/main.yml create mode 100644 roles/nickhammond.logrotate/templates/logrotate.d.j2 create mode 100644 roles/nickhammond.logrotate/tests/inventory create mode 100644 roles/nickhammond.logrotate/tests/test.yml (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index e9ebc3e02..b77bcdc1a 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -159,6 +159,22 @@ type: "{{host_type}}" when: host_type != "node" +- set_fact: + logrotate: + - name: syslog + path: "/var/log/cron + \n/var/log/maillog + \n/var/log/messages + \n/var/log/secure + \n/var/log/spooler \n" + options: + - daily + - rotate 7 + - compress + - sharedscripts + scripts: + postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true" + - name: Add new instances groups and variables add_host: hostname: "{{ item.0 }}" @@ -169,6 +185,7 @@ ec2_private_ip_address: "{{ item.1.private_ip }}" ec2_ip_address: "{{ item.1.public_ip }}" openshift_node_labels: "{{ node_label }}" + logrotate_scripts: "{{ logrotate }}" with_together: - instances - ec2.instances diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 3cc561ba0..952960652 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -85,6 +85,7 @@ when: etcd_server_certs_missing roles: - etcd + - role: nickhammond.logrotate - name: Delete temporary directory on localhost hosts: localhost diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index acf85fc04..ed40d4b89 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -199,6 +199,7 @@ when: master_certs_missing and 'oo_first_master' not in group_names roles: - openshift_master + - role: nickhammond.logrotate - role: fluentd_master when: openshift.common.use_fluentd | bool post_tasks: diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 705f7f223..e0060a9a3 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -99,6 +99,7 @@ when: certs_missing roles: - openshift_node + - role: nickhammond.logrotate - role: fluentd_node when: openshift.common.use_fluentd | bool tasks: diff --git a/roles/nickhammond.logrotate/.travis.yml b/roles/nickhammond.logrotate/.travis.yml new file mode 100644 index 000000000..161023def --- /dev/null +++ b/roles/nickhammond.logrotate/.travis.yml @@ -0,0 +1,14 @@ +--- +language: python +python: "2.7" +before_install: + - sudo apt-get update -qq + - sudo apt-get install -qq python-apt python-pycurl +install: + - pip install ansible +script: + - "printf '[defaults]\nroles_path = ../' > ansible.cfg" + - ansible-playbook -i tests/inventory --syntax-check tests/test.yml + - ansible-playbook -i tests/inventory --connection=local --sudo -vvvv tests/test.yml +notifications: + email: false diff --git a/roles/nickhammond.logrotate/README.md b/roles/nickhammond.logrotate/README.md new file mode 100644 index 000000000..602b5ef6c --- /dev/null +++ b/roles/nickhammond.logrotate/README.md @@ -0,0 +1,71 @@ +[![Build Status](https://travis-ci.org/nickhammond/ansible-logrotate.svg?branch=master)](https://travis-ci.org/nickhammond/ansible-logrotate) + +Role Name +======== + +Installs logrotate and provides an easy way to setup additional logrotate scripts by specifying a list of directives. + +Requirements +------------ + +None + +Role Variables +-------------- + +**logrotate_scripts**: A list of logrotate scripts and the directives to use for the rotation. + +* name - The name of the script that goes into /etc/logrotate.d/ +* path - Path to point logrotate to for the log rotation +* options - List of directives for logrotate, view the logrotate man page for specifics +* scripts - Dict of scripts for logrotate (see Example below) + +``` +logrotate_scripts: + - name: rails + path: "/srv/current/log/*.log" + options: + - weekly + - size 25M + - missingok + - compress + - delaycompress + - copytruncate +``` + +Dependencies +------------ + +None + +Example Playbook +------------------------- + +Setting up logrotate for additional Nginx logs, with postrotate script. + +``` +logrotate_scripts: + - name: nginx + path: /var/log/nginx/*.log + options: + - weekly + - size 25M + - rotate 7 + - missingok + - compress + - delaycompress + - copytruncate + scripts: + postrotate: "[ -s /run/nginx.pid ] && kill USR1 `cat /run/nginx.pid`" + +``` + +License +------- + +BSD + +Author Information +------------------ + +Find [Nick Hammond]( http://www.nickhammond.com ) on [Twitter](http://twitter.com/nickhammond). diff --git a/roles/nickhammond.logrotate/meta/.galaxy_install_info b/roles/nickhammond.logrotate/meta/.galaxy_install_info new file mode 100644 index 000000000..0d76708c9 --- /dev/null +++ b/roles/nickhammond.logrotate/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Thu Aug 27 15:26:31 2015', version: master} diff --git a/roles/nickhammond.logrotate/meta/main.yml b/roles/nickhammond.logrotate/meta/main.yml new file mode 100644 index 000000000..1717b6d3d --- /dev/null +++ b/roles/nickhammond.logrotate/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Nick Hammond + description: Role to configure logrotate scripts + license: BSD + min_ansible_version: 1.5 + platforms: + - name: Ubuntu + versions: + - lucid + - precise + - trusty + categories: + - system +dependencies: [] diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml new file mode 100644 index 000000000..fda23e05e --- /dev/null +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: nickhammond.logrotate | Install logrotate + action: "{{ansible_pkg_mgr}} pkg=logrotate state=present" + +- name: nickhammond.logrotate | Setup logrotate.d scripts + template: + src: logrotate.d.j2 + dest: /etc/logrotate.d/{{ item.name }} + with_items: logrotate_scripts + when: logrotate_scripts is defined diff --git a/roles/nickhammond.logrotate/templates/logrotate.d.j2 b/roles/nickhammond.logrotate/templates/logrotate.d.j2 new file mode 100644 index 000000000..6453be6b2 --- /dev/null +++ b/roles/nickhammond.logrotate/templates/logrotate.d.j2 @@ -0,0 +1,16 @@ +# {{ ansible_managed }} + +{{ item.path }} { + {% if item.options is defined -%} + {% for option in item.options -%} + {{ option }} + {% endfor -%} + {% endif %} + {%- if item.scripts is defined -%} + {%- for name, script in item.scripts.iteritems() -%} + {{ name }} + {{ script }} + endscript + {% endfor -%} + {% endif -%} +} diff --git a/roles/nickhammond.logrotate/tests/inventory b/roles/nickhammond.logrotate/tests/inventory new file mode 100644 index 000000000..2fbb50c4a --- /dev/null +++ b/roles/nickhammond.logrotate/tests/inventory @@ -0,0 +1 @@ +localhost diff --git a/roles/nickhammond.logrotate/tests/test.yml b/roles/nickhammond.logrotate/tests/test.yml new file mode 100644 index 000000000..e806b0a02 --- /dev/null +++ b/roles/nickhammond.logrotate/tests/test.yml @@ -0,0 +1,18 @@ +--- +- hosts: all + sudo: True + roles: + - ansible-logrotate + - role: ansible-logrotate + logrotate_scripts: + - name: nginx-options + path: /var/log/nginx/options.log + options: + - daily + + - role: ansible-logrotate + logrotate_scripts: + - name: nginx-scripts + path: /var/log/nginx/scripts.log + scripts: + postrotate: "echo test" -- cgit v1.2.3 From d565411ae9f2080c7c575744099fe5f79de2bb55 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Aug 2015 21:32:17 -0400 Subject: adhoc/tutorial_reset: Don't error out if there are no Docker images I'd like this playbook to always work. --- playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 1200caa2a..3e22f8f2d 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -61,12 +61,15 @@ - shell: docker ps -a -q | xargs docker stop changed_when: False + failed_when: False - shell: docker ps -a -q| xargs docker rm changed_when: False + failed_when: False - shell: docker images -q |xargs docker rmi changed_when: False + failed_when: False - file: path={{ item }} state=absent with_items: -- cgit v1.2.3 From 61ba47474f12fb83e9e40f2a1f0a47fd5d393457 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Aug 2015 22:19:27 -0400 Subject: adhoc/tutorial_reset: Also delete etcd and data I needed this because I forgot to override openshift_hostname, and it found the wrong hostname, which then leaked into etcd certs, which caused the master to fail to start. --- playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 3e22f8f2d..54d3ea278 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -19,10 +19,12 @@ - openshift-node - atomic-enterprise-master - atomic-enterprise-node + - etcd - yum: name={{ item }} state=absent with_items: - openvswitch + - etcd - origin - origin-master - origin-node @@ -89,6 +91,8 @@ - /etc/sysconfig/openshift-node - /etc/sysconfig/atomic-enterprise-master - /etc/sysconfig/atomic-enterprise-node + - /etc/etcd + - /var/lib/etcd - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 05c5d6e1a0de2e7a5f5cb509b08981ba9b1ec69b Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 27 Aug 2015 17:45:14 -0400 Subject: Add cockpit-ws with cockpit-kubernetes plugin --- inventory/byo/hosts.example | 6 ++++++ playbooks/common/openshift-master/config.yml | 9 +++++++++ roles/cockpit/defaults/main.yml | 5 +++++ roles/cockpit/meta/main.yml | 15 +++++++++++++++ roles/cockpit/tasks/main.yml | 16 ++++++++++++++++ 5 files changed, 51 insertions(+) create mode 100644 roles/cockpit/defaults/main.yml create mode 100644 roles/cockpit/meta/main.yml create mode 100644 roles/cockpit/tasks/main.yml (limited to 'playbooks') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 2bbc4ca1e..1685003e8 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -44,6 +44,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure Fluentd #use_fluentd=true +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + # master cluster ha variables using pacemaker or RHEL HA #openshift_master_cluster_password=openshift_cluster #openshift_master_cluster_vip=192.168.133.25 diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ed40d4b89..e8e3dcfdc 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -219,6 +219,15 @@ - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool +- name: Enable cockpit + hosts: oo_first_master + vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + roles: + - role: cockpit + when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and + (osm_use_cockpit | bool or osm_use_cockpit is undefined ) + # Additional instance config for online deployments - name: Additional instance config hosts: oo_masters_deployment_type_online diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml new file mode 100644 index 000000000..ffd55f1dd --- /dev/null +++ b/roles/cockpit/defaults/main.yml @@ -0,0 +1,5 @@ +--- +os_firewall_use_firewalld: false +os_firewall_allow: +- service: cockpit-ws + port: 9090/tcp diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml new file mode 100644 index 000000000..1e3948b19 --- /dev/null +++ b/roles/cockpit/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Scott Dodson + description: Deploy and Enable cockpit-ws plus optional plugins + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.7 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: + - { role: os_firewall } diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml new file mode 100644 index 000000000..875cbad21 --- /dev/null +++ b/roles/cockpit/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Install cockpit-ws + yum: + name: "{{ item }}" + state: present + with_items: + - cockpit-ws + - cockpit-shell + - cockpit-bridge + - "{{ cockpit_plugins }}" + +- name: Enable cockpit-ws + service: + name: cockpit.socket + enabled: true + state: started -- cgit v1.2.3 From 1f52ea8c4e2f8cfce51e98cb3614c61f0d78ec3e Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Fri, 28 Aug 2015 18:03:59 -0400 Subject: added docker zabbix template, removed unused / old templates so they don't confuse other people. --- playbooks/adhoc/zabbix_setup/filter_plugins | 1 + playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml | 7 +++ playbooks/adhoc/zabbix_setup/oo-config-zaio.yml | 13 ++++ playbooks/adhoc/zabbix_setup/roles | 1 + roles/lib_zabbix/library/zbx_trigger.py | 4 +- roles/lib_zabbix/tasks/create_template.yml | 36 +++++------ roles/os_zabbix/tasks/main.yml | 9 +++ roles/os_zabbix/vars/template_docker.yml | 83 +++++++++++++++++++++++++ roles/os_zabbix/vars/template_host.yml | 27 -------- roles/os_zabbix/vars/template_master.yml | 27 -------- roles/os_zabbix/vars/template_node.yml | 27 -------- roles/os_zabbix/vars/template_router.yml | 27 -------- 12 files changed, 133 insertions(+), 129 deletions(-) create mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins create mode 100755 playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml create mode 100755 playbooks/adhoc/zabbix_setup/oo-config-zaio.yml create mode 120000 playbooks/adhoc/zabbix_setup/roles create mode 100644 roles/os_zabbix/vars/template_docker.yml delete mode 100644 roles/os_zabbix/vars/template_host.yml delete mode 100644 roles/os_zabbix/vars/template_master.yml delete mode 100644 roles/os_zabbix/vars/template_node.yml delete mode 100644 roles/os_zabbix/vars/template_router.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml new file mode 100755 index 000000000..0fe65b338 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +- include: clean_zabbix.yml + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml new file mode 100755 index 000000000..e2b8150c6 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -0,0 +1,13 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost + gather_facts: no + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix + roles: + - role: os_zabbix + ozb_server: "{{ g_server }}" + ozb_user: "{{ g_user }}" + ozb_password: "{{ g_password }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py index 43f3d2677..c707a2f64 100644 --- a/roles/lib_zabbix/library/zbx_trigger.py +++ b/roles/lib_zabbix/library/zbx_trigger.py @@ -65,7 +65,7 @@ def get_deps(zapi, deps): for desc in deps: content = zapi.get_content('trigger', 'get', - {'search': {'description': desc}, + {'filter': {'description': desc}, 'expandExpression': True, 'selectDependencies': 'triggerid', }) @@ -119,7 +119,7 @@ def main(): content = zapi.get_content(zbx_class_name, 'get', - {'search': {'description': description}, + {'filter': {'description': description}, 'expandExpression': True, 'selectDependencies': 'triggerid', }) diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml index 022ca52f2..444fd0a14 100644 --- a/roles/lib_zabbix/tasks/create_template.yml +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -16,18 +16,21 @@ - debug: var=lzbx_applications -- name: Create Application - zbx_application: - zbx_server: "{{ server }}" - zbx_user: "{{ user }}" - zbx_password: "{{ password }}" - name: "{{ item }}" - template_name: "{{ template.name }}" - with_items: lzbx_applications - register: created_application - when: template.zitems is defined - -- debug: var=created_application +# +# twiest: This is commented out because it doesn't work correctly +# +#- name: Create Application +# zbx_application: +# zbx_server: "{{ server }}" +# zbx_user: "{{ user }}" +# zbx_password: "{{ password }}" +# name: "{{ item }}" +# template_name: "{{ template.name }}" +# with_items: lzbx_applications +# register: created_application +# when: template.zitems is defined + +#- debug: var=created_application - name: Create Items zbx_item: @@ -38,24 +41,19 @@ name: "{{ item.name | default(item.key, true) }}" value_type: "{{ item.value_type | default('int') }}" template_name: "{{ template.name }}" - applications: "{{ item.applications }}" +# applications: "{{ item.applications }}" with_items: template.zitems register: created_items when: template.zitems is defined -#- debug: var=ctp_created_items - - name: Create Triggers zbx_trigger: zbx_server: "{{ server }}" zbx_user: "{{ user }}" zbx_password: "{{ password }}" description: "{{ item.description }}" + dependencies: "{{ item.dependencies | default([], true) }}" expression: "{{ item.expression }}" priority: "{{ item.priority }}" with_items: template.ztriggers when: template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index 7111c778b..5d6e67606 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -11,6 +11,7 @@ - include_vars: template_heartbeat.yml - include_vars: template_os_linux.yml +- include_vars: template_docker.yml - name: Include Template Heartbeat include: ../../lib_zabbix/tasks/create_template.yml @@ -28,3 +29,11 @@ user: "{{ ozb_user }}" password: "{{ ozb_password }}" +- name: Include Template docker + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_docker }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml new file mode 100644 index 000000000..eab497269 --- /dev/null +++ b/roles/os_zabbix/vars/template_docker.yml @@ -0,0 +1,83 @@ +--- +g_template_docker: + name: Template Docker + zitems: + - key: docker.ping + applications: + - Docker Daemon + value_type: int + + - key: docker.storage.is_loopback + applications: + - Docker Storage + value_type: int + + - key: docker.storage.data.space.total + applications: + - Docker Storage + value_type: float + + - key: docker.storage.data.space.used + applications: + - Docker Storage + value_type: float + + - key: docker.storage.data.space.available + applications: + - Docker Storage + value_type: float + + - key: docker.storage.data.space.percent_available + applications: + - Docker Storage + value_type: float + + - key: docker.storage.metadata.space.total + applications: + - Docker Storage + value_type: float + + - key: docker.storage.metadata.space.used + applications: + - Docker Storage + value_type: float + + - key: docker.storage.metadata.space.available + applications: + - Docker Storage + value_type: float + + - key: docker.storage.metadata.space.percent_available + applications: + - Docker Storage + value_type: float + ztriggers: + - description: 'docker.ping failed on {HOST.NAME}' + expression: '{Template Docker:docker.ping.max(#3)}<1' + priority: high + + - description: 'Docker storage is using LOOPBACK on {HOST.NAME}' + expression: '{Template Docker:docker.storage.is_loopback.last()}<>0' + priority: high + + - description: 'Critically low docker storage data space on {HOST.NAME}' + expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.data.space.available.max(#3)}<5' # < 5% or < 5GB + priority: high + + - description: 'Critically low docker storage metadata space on {HOST.NAME}' + expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.1' # < 5% or < 100MB + priority: high + + # Put triggers that depend on other triggers here (deps must be created first) + - description: 'Low docker storage data space on {HOST.NAME}' + expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.data.space.available.max(#3)}<10' # < 10% or < 10GB + dependencies: + - 'Critically low docker storage data space on {HOST.NAME}' + priority: average + + - description: 'Low docker storage metadata space on {HOST.NAME}' + expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.2' # < 10% or < 200MB + dependencies: + - 'Critically low docker storage metadata space on {HOST.NAME}' + priority: average + diff --git a/roles/os_zabbix/vars/template_host.yml b/roles/os_zabbix/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/roles/os_zabbix/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: - params: - name: Template Host - host: Template Host - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Host - zitems: - - name: Host Ping - hostid: - key_: host.ping - type: 2 - value_type: 0 - output: extend - search: - key_: host.ping - ztriggers: - - description: 'Host ping has failed on {HOST.NAME}' - expression: '{Template Host:host.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Host ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_master.yml b/roles/os_zabbix/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/roles/os_zabbix/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: - params: - name: Template Master - host: Template Master - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Master - zitems: - - name: Master Etcd Ping - hostid: - key_: master.etcd.ping - type: 2 - value_type: 0 - output: extend - search: - key_: master.etcd.ping - ztriggers: - - description: 'Master Etcd ping has failed on {HOST.NAME}' - expression: '{Template Master:master.etcd.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Master Etcd ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_node.yml b/roles/os_zabbix/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/roles/os_zabbix/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: - params: - name: Template Node - host: Template Node - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Node - zitems: - - name: Kubelet Ping - hostid: - key_: kubelet.ping - type: 2 - value_type: 0 - output: extend - search: - key_: kubelet.ping - ztriggers: - - description: 'Kubelet ping has failed on {HOST.NAME}' - expression: '{Template Node:kubelet.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Kubelet ping has failed on*' - expandExpression: True diff --git a/roles/os_zabbix/vars/template_router.yml b/roles/os_zabbix/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/roles/os_zabbix/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: - params: - name: Template Router - host: Template Router - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Router - zitems: - - name: Router Backends down - hostid: - key_: router.backends.down - type: 2 - value_type: 0 - output: extend - search: - key_: router.backends.down - ztriggers: - - description: 'Number of router backends down on {HOST.NAME}' - expression: '{Template Router:router.backends.down.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Number of router backends down on {HOST.NAME}' - expandExpression: True -- cgit v1.2.3 From 21596a2dd2ef0239592f9376223a993cab5e9944 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Mon, 24 Aug 2015 13:48:13 -0700 Subject: wait for ports instead of waiting for an arbitrary period of time --- playbooks/common/openshift-master/config.yml | 1 + roles/fluentd_master/tasks/main.yml | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index d7c4044e0..4a4a69f50 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -186,6 +186,7 @@ vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + embedded_etcd: "{{ openshift.master.embedded_etcd }}" pre_tasks: - name: Ensure certificate directory exists file: diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index d592dc306..69f8eceab 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -39,8 +39,13 @@ owner: 'td-agent' mode: 0444 -- name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes." - pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }} +- name: wait for etcd to start up + wait_for: port=4001 delay=10 + when: embedded_etcd | bool + +- name: wait for etcd peer to start up + wait_for: port=7001 delay=10 + when: embedded_etcd | bool - name: ensure td-agent is running service: -- cgit v1.2.3 From 8e3d2689c442762cdd5df08ca31721c5b17b2ee8 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 27 Aug 2015 12:45:06 -0700 Subject: added new node fact for schedulability --- playbooks/common/openshift-node/config.yml | 10 ++-------- roles/openshift_facts/library/openshift_facts.py | 18 ++++++++++++++++++ roles/openshift_manage_node/tasks/main.yml | 12 ++++-------- roles/openshift_node/tasks/main.yml | 1 + 4 files changed, 25 insertions(+), 16 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index ba2f40d55..a14ca8e11 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -20,6 +20,7 @@ local_facts: labels: "{{ openshift_node_labels | default(None) }}" annotations: "{{ openshift_node_annotations | default(None) }}" + schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - name: Check status of node certificates stat: path: "{{ openshift.common.config_base }}/node/{{ item }}" @@ -124,21 +125,14 @@ - os_env_extras - os_env_extras_node -- name: Set scheduleability +- name: Set schedulability hosts: oo_first_master vars: openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname') }}" - openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) - | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - - set_fact: - openshift_scheduleable_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | difference(openshift_unscheduleable_nodes) }}" roles: - openshift_manage_node diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d4f38a7b4..23c74f61c 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -301,6 +301,23 @@ def set_fluentd_facts_if_unset(facts): facts['common']['use_fluentd'] = use_fluentd return facts +def set_node_schedulability(facts): + """ Set schedulable facts if not already present in facts dict + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated schedulable + facts if they were not already present + + """ + if 'node' in facts: + if 'schedulable' not in facts['node']: + if 'master' in facts: + facts['node']['schedulable'] = False + else: + facts['node']['schedulable'] = True + return facts + def set_metrics_facts_if_unset(facts): """ Set cluster metrics facts if not already present in facts dict dict: the facts dict updated with the generated cluster metrics facts if @@ -741,6 +758,7 @@ class OpenShiftFacts(object): facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_fluentd_facts_if_unset(facts) + facts = set_node_schedulability(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts) diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 74e702248..7c4f45ce6 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -7,15 +7,11 @@ delay: 5 with_items: openshift_nodes -- name: Handle unscheduleable node +- name: Set node schedulability command: > - {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=false - with_items: openshift_unscheduleable_nodes - -- name: Handle scheduleable node - command: > - {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true - with_items: openshift_scheduleable_nodes + {{ openshift.common.admin_binary }} manage-node {{ item.openshift.common.hostname }} --schedulable={{ 'true' if item.openshift.node.schedulable | bool else 'false' }} + with_items: + - "{{ openshift_node_vars }}" - name: Label nodes command: > diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e56a666e9..11190ab72 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -29,6 +29,7 @@ portal_net: "{{ openshift_master_portal_net | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - name: Install Node package yum: pkg={{ openshift.common.service_type }}-node state=present -- cgit v1.2.3 From 12227816bd4722b7982c6dced0b43f60dc82b4e1 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Wed, 9 Sep 2015 09:37:28 -0400 Subject: Default masters to t2.medium instead of t2.small --- playbooks/aws/openshift-cluster/vars.online.int.yml | 2 +- playbooks/aws/openshift-cluster/vars.online.prod.yml | 2 +- playbooks/aws/openshift-cluster/vars.online.stage.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index bb18e13b0..2e2f25ccd 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.small +ec2_master_instance_type: t2.medium ec2_master_security_groups: [ 'integration', 'integration-master' ] ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'integration', 'integration-infra' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index bbef9cc56..18a53e12e 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.small +ec2_master_instance_type: t2.medium ec2_master_security_groups: [ 'production', 'production-master' ] ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'production', 'production-infra' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index 9008a55ba..1f9ac4252 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.small +ec2_master_instance_type: t2.medium ec2_master_security_groups: [ 'stage', 'stage-master' ] ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'stage', 'stage-infra' ] -- cgit v1.2.3 From cdfd68f642c586d5d2bba7ff3c4721dc417717c1 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Tue, 8 Sep 2015 16:31:35 -0700 Subject: Create service accounts and set up scc rules --- playbooks/common/openshift-master/config.yml | 9 ++++++++ roles/openshift_serviceaccounts/tasks/main.yml | 26 ++++++++++++++++++++++ .../templates/serviceaccount.j2 | 4 ++++ 3 files changed, 39 insertions(+) create mode 100644 roles/openshift_serviceaccounts/tasks/main.yml create mode 100644 roles/openshift_serviceaccounts/templates/serviceaccount.j2 (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 4a4a69f50..64cf7a65b 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -236,3 +236,12 @@ tasks: - file: name={{ g_master_mktemp.stdout }} state=absent changed_when: False + +- name: Configure service accounts + hosts: oo_first_master + + vars: + accounts: ["router", "registry"] + + roles: + - openshift_serviceaccounts diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml new file mode 100644 index 000000000..9665d0a72 --- /dev/null +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -0,0 +1,26 @@ +- name: Create service account configs + template: + src: serviceaccount.j2 + dest: "/tmp/{{ item }}-serviceaccount.yaml" + with_items: accounts + +- name: Create {{ item }} service account + command: > + {{ openshift.common.client_binary }} create -f "/tmp/{{ item }}-serviceaccount.yaml" + with_items: accounts + register: _sa_result + failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0" + changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0" + +- name: Get current security context constraints + shell: "{{ openshift.common.client_binary }} get scc privileged -o yaml > /tmp/scc.yaml" + +- name: Add security context constraint for {{ item }} + lineinfile: + dest: /tmp/scc.yaml + line: "- system:serviceaccount:default:{{ item }}" + insertafter: "^users:$" + with_items: accounts + +- name: Apply new scc rules for service accounts + command: "{{ openshift.common.client_binary }} replace -f /tmp/scc.yaml" diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 new file mode 100644 index 000000000..931e249f9 --- /dev/null +++ b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ item }} -- cgit v1.2.3 From c1c8d6045e22a01e81f582bd4b80cc8fadf6e035 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 10:54:39 -0400 Subject: added the docker loopback fixer script --- .../docker_loopback_to_lvm/docker-storage-setup | 2 + .../docker_loopback_to_direct_lvm.yml | 141 +++++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup create mode 100644 playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup new file mode 100644 index 000000000..059058823 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup @@ -0,0 +1,2 @@ +DEVS=/dev/xvdb +VG=docker_vg diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml new file mode 100644 index 000000000..70c6e03dc --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -0,0 +1,141 @@ +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker) +# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use. +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_environment=' -e "cli_volume_size=30" -e docker_loopback_to_direct_lvm.yml.yml +# +# Notes: +# * By default this will do a 30GB volume. +# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) +# + +- name: Fix docker to have a provisioned iops drive + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: io1 + cli_volume_size: 30 + cli_volume_iops: {{ 30 * cli_volume_size}} + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: check to see if /dev/xvdb exists + command: test -e /dev/xvdb + register: xvdb_check + ignore_errors: yes + + - debug: var=xvdb_check + + - name: fail if /dev/xvdb already exists + fail: + msg: /dev/xvdb already exists. Please investigate + when: xvdb_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdb + iops: "{{ 30 * cli_volume_size }}" + register: vol + + - debug: var=vol + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: Wait for volume to attach + pause: + seconds: 30 + + - name: copy the docker-storage-setup config file + copy: + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart -- cgit v1.2.3 From 246fa73c71387f0c44d1689907416ca5da5bba2f Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 11:16:52 -0400 Subject: cleaned up some errors in loopback playbook --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 70c6e03dc..74cc9f628 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -8,7 +8,10 @@ # export AWS_SECRET_ACCESS_KEY='XXXXXX' # # 2. run the playbook: -# ansible-playbook -e 'cli_environment=' -e "cli_volume_size=30" -e docker_loopback_to_direct_lvm.yml.yml +# ansible-playbook -e 'cli_tag_name=' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-master-f58e0' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Notes: # * By default this will do a 30GB volume. @@ -26,7 +29,7 @@ vars: cli_volume_type: io1 cli_volume_size: 30 - cli_volume_iops: {{ 30 * cli_volume_size}} + cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: -- cgit v1.2.3 From 92cc48330ed171171c6a370644a4778727018fad Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 12:14:39 -0400 Subject: added playbook for docker storage cleanup --- .../docker_loopback_to_direct_lvm.yml | 11 +++- .../docker_storage_cleanup.yml | 69 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 74cc9f628..c9ae923bb 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -11,7 +11,7 @@ # ansible-playbook -e 'cli_tag_name=' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Example: -# ansible-playbook -e 'cli_tag_name=ops-master-f58e0' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Notes: # * By default this will do a 30GB volume. @@ -136,9 +136,16 @@ - debug: var=setup_output - - name: start docker command: systemctl start docker.service register: dockerstart - debug: var=dockerstart + + - name: Wait for docker to stabilize + pause: + seconds: 30 + + # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support + - name: update zabbix docker items + command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml new file mode 100644 index 000000000..1946a5f4f --- /dev/null +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -0,0 +1,69 @@ +--- +# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues. +# +# To run: +# +# 1. run the playbook: +# +# ansible-playbook -e 'cli_tag_name=' docker_storage_cleanup.yml +# +# Example: +# +# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml +# +# Notes: +# * This *should* not interfere with running docker images +# + +- name: Clean up Docker Storage + gather_facts: no + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + + pre_tasks: + + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + + - name: Ensure docker is running + service: + name: docker + state: started + enabled: yes + + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines + + - name: Remove exited and dead containers + shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm" + ignore_errors: yes + + - name: Remove dangling docker images + shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" + ignore_errors: yes + + - name: Remove non-running docker images + shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" + ignore_errors: yes + + # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support + - name: update zabbix docker items + command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py + + # Get and show docker info again. + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines -- cgit v1.2.3 From d02e346a12c356bd87c0e233d22db03791732841 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Mon, 21 Sep 2015 10:47:52 -0400 Subject: commented out dangerous playbook option for cleanup --- playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index 1946a5f4f..53a5c15ef 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -51,9 +51,10 @@ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" ignore_errors: yes - - name: Remove non-running docker images - shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" - ignore_errors: yes +# mwoodson & twiest: this is dangerous, commenting out for now. +# - name: Remove non-running docker images +# shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" +# ignore_errors: yes # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - name: update zabbix docker items -- cgit v1.2.3 From 15fef0ed1619709446d7dd0b61d198cc650f53cc Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Mon, 21 Sep 2015 12:51:56 -0400 Subject: changed the docker cleanup to exclude certain registries --- playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index 53a5c15ef..a19291a9f 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -51,10 +51,9 @@ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" ignore_errors: yes -# mwoodson & twiest: this is dangerous, commenting out for now. -# - name: Remove non-running docker images -# shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" -# ignore_errors: yes + - name: Remove non-running docker images + shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null" + ignore_errors: yes # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - name: update zabbix docker items -- cgit v1.2.3 From 44f2904159c5a3e0045eb413287a9c1778f91adb Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 10 Sep 2015 10:27:35 -0400 Subject: Upgrades --- playbooks/adhoc/upgrades/README.md | 21 +++++ playbooks/adhoc/upgrades/filter_plugins | 1 + playbooks/adhoc/upgrades/lookup_plugins | 1 + playbooks/adhoc/upgrades/roles | 1 + playbooks/adhoc/upgrades/upgrade.yml | 115 +++++++++++++++++++++++++ roles/etcd/tasks/main.yml | 1 + roles/etcd_ca/tasks/main.yml | 1 + roles/fluentd_master/tasks/main.yml | 1 - roles/openshift_examples/defaults/main.yml | 2 + roles/openshift_examples/tasks/main.yml | 12 +-- roles/openshift_master/tasks/main.yml | 2 + roles/openshift_node/tasks/main.yml | 1 + roles/openshift_serviceaccounts/tasks/main.yml | 2 +- 13 files changed, 153 insertions(+), 8 deletions(-) create mode 100644 playbooks/adhoc/upgrades/README.md create mode 120000 playbooks/adhoc/upgrades/filter_plugins create mode 120000 playbooks/adhoc/upgrades/lookup_plugins create mode 120000 playbooks/adhoc/upgrades/roles create mode 100644 playbooks/adhoc/upgrades/upgrade.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md new file mode 100644 index 000000000..6de8a970f --- /dev/null +++ b/playbooks/adhoc/upgrades/README.md @@ -0,0 +1,21 @@ +# [NOTE] +This playbook will re-run installation steps overwriting any local +modifications. You should ensure that your inventory has been updated with any +modifications you've made after your initial installation. If you find any items +that cannot be configured via ansible please open an issue at +https://github.com/openshift/openshift-ansible + +# Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies latest configuration by re-running the installation playbook + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +# Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/upgrades/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins new file mode 120000 index 000000000..73cafffe5 --- /dev/null +++ b/playbooks/adhoc/upgrades/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/upgrades/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml new file mode 100644 index 000000000..e666f0472 --- /dev/null +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -0,0 +1,115 @@ +--- +- name: Re-Run cluster configuration to apply latest configuration changes + include: ../../common/openshift-cluster/config.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" + +- name: Upgrade masters + hosts: masters + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted + +- name: Upgrade nodes + hosts: nodes + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade node packages + yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest + - name: Restart node services + service: name="{{ openshift.common.service_type }}-node" state=restarted + +- name: Determine new master version + hosts: oo_first_master + tasks: + - name: Determine new version + command: > + rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master + register: _new_version + +- name: Ensure AOS 3.0.2 or Origin 1.0.6 + hosts: oo_first_master + tasks: + fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later + when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) + +- name: Update cluster policy + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-roles --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + +- name: Upgrade default router + hosts: oo_first_master + vars: + - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + - name: Check for allowHostNetwork and allowHostPorts + when: _default_router.rc == 0 + shell: > + {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork + register: _scc + - name: Grant allowHostNetwork and allowHostPorts + when: + - _default_router.rc == 0 + - "'false' in _scc.stdout" + command: > + {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 + - name: Update deployment config to 1.0.4/3.0.1 spec + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' + - name: Switch to hostNetwork=true + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + +- name: Upgrade default + hosts: oo_first_master + vars: + - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + +- name: Update image streams and templates + hosts: oo_first_master + vars: + openshift_examples_import_command: "update" + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_examples diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 27bfb7de9..656901409 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -38,6 +38,7 @@ template: src: etcd.conf.j2 dest: /etc/etcd/etcd.conf + backup: true notify: - restart etcd diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml index 8a266f732..625756867 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd_ca/tasks/main.yml @@ -18,6 +18,7 @@ - template: dest: "{{ etcd_ca_dir }}/fragments/openssl_append.cnf" src: openssl_append.j2 + backup: true - assemble: src: "{{ etcd_ca_dir }}/fragments" diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 69f8eceab..55cd94460 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -52,4 +52,3 @@ name: 'td-agent' state: started enabled: yes - diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 3246790aa..7d4f100e3 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -14,3 +14,5 @@ db_templates_base: "{{ examples_base }}/db-templates" xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json" xpaas_templates_base: "{{ examples_base }}/xpaas-templates" quickstarts_base: "{{ examples_base }}/quickstart-templates" + +openshift_examples_import_command: "create" diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index bfc6dfb0a..3a829a4c6 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -7,7 +7,7 @@ # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ rhel_image_streams }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ rhel_image_streams }} when: openshift_examples_load_rhel register: oex_import_rhel_streams failed_when: "'already exists' not in oex_import_rhel_streams.stderr and oex_import_rhel_streams.rc != 0" @@ -15,7 +15,7 @@ - name: Import Centos Image streams command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ centos_image_streams }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ centos_image_streams }} when: openshift_examples_load_centos | bool register: oex_import_centos_streams failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0" @@ -23,7 +23,7 @@ - name: Import db templates command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ db_templates_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ db_templates_base }} when: openshift_examples_load_db_templates | bool register: oex_import_db_templates failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -31,7 +31,7 @@ - name: Import quickstart-templates command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ quickstarts_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }} when: openshift_examples_load_quickstarts register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -40,7 +40,7 @@ - name: Import xPaas image streams command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_image_streams }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_image_streams }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_streams failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -48,7 +48,7 @@ - name: Import xPaas templates command: > - {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_templates_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_templates_base }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_templates failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b57711b58..fa12005ab 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -100,6 +100,7 @@ template: dest: "{{ openshift_master_scheduler_conf }}" src: scheduler.json.j2 + backup: true notify: - restart master @@ -129,6 +130,7 @@ template: dest: "{{ openshift_master_config_file }}" src: master.yaml.v1.j2 + backup: true notify: - restart master diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 1986b631e..e8cc499c0 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -47,6 +47,7 @@ template: dest: "{{ openshift_node_config_file }}" src: node.yaml.v1.j2 + backup: true notify: - restart node diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index 9665d0a72..d93a25a21 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -23,4 +23,4 @@ with_items: accounts - name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} replace -f /tmp/scc.yaml" + command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml" -- cgit v1.2.3 From 6c6635df9d2fb57b1e70bfc63b7301b7e7c28d72 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 17 Sep 2015 15:01:51 -0700 Subject: Added S3 docker-registry config script --- playbooks/adhoc/s3_registry/s3_registry.j2 | 20 +++++++++++ playbooks/adhoc/s3_registry/s3_registry.yml | 55 +++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 playbooks/adhoc/s3_registry/s3_registry.j2 create mode 100644 playbooks/adhoc/s3_registry/s3_registry.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 new file mode 100644 index 000000000..eb8660f6c --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -0,0 +1,20 @@ +version: 0.1 +log: + level: debug +http: + addr: :5000 +storage: + cache: + layerinfo: inmemory + s3: + accesskey: {{ accesskey }} + secretkey: {{ secretkey }} + region: us-east-1 + bucket: {{ bucketname }} + encrypt: true + secure: true + v4auth: true + rootdirectory: /registry +middleware: + repository: + - name: openshift diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml new file mode 100644 index 000000000..61280df0b --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -0,0 +1,55 @@ +--- +# This playbook creates an S3 bucket, if it doesn't already exist, and configures the docker registry service to use the bucket as its backend storage. +# Usage: +# ansible-playbook s3_registry.yml -e bucketname="mybucket" -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e master="master fqdn or IP" -i "master," +# +# Example: +# ansible-playbook s3_registry.yml -e accesskey="asdf" -e secretkey="hjkl" -e bucketname="testbucket" -e master="54.173.148.238" -i "54.173.148.238," +# +# The bucket name can be anything, but generally should correspond with your cluster name. +# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. +# The 'master' param is the fqdn or public IP of your cluster's master. +# The -i param allows this playbook to be run on your master, even if it's not yet in your main inventory file. (The comma is mandatory). + +- hosts: "{{ master }}" + remote_user: root + gather_facts: False + + tasks: + + - name: Create S3 bucket + local_action: + module: s3 bucket={{ bucketname|quote }} mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + + - name: Generate docker registry config + template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 + + - name: Determine if new secrets are needed + command: oc get secrets + register: secrets + + - name: Create registry secrets + command: oc secrets new dockerregistry /root/config.yml + when: "'dockerregistry' not in secrets.stdout" + + - name: Determine if service account contains secrets + command: oc describe serviceaccount/registry + register: serviceaccount + + - name: Add secrets to registry service account + command: oc secrets add serviceaccount/registry secrets/dockerregistry + when: "'dockerregistry' not in serviceaccount.stdout" + + - name: Determine if deployment config contains secrets + command: oc volume dc/docker-registry --list + register: dc + + - name: Add secrets to registry deployment config + command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry + when: "'dockersecrets' not in dc.stdout" + + - name: Scale up registry + command: oc scale --replicas=1 dc/docker-registry + + - name: Delete temporary config file + file: path=/root/config.yml state=absent -- cgit v1.2.3 From 9deff4bd696168111316dc366c1b193e02e08c8b Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 24 Sep 2015 11:56:30 -0700 Subject: added dynamic inventory support for single-master clusters --- playbooks/adhoc/s3_registry/s3_registry.j2 | 2 +- playbooks/adhoc/s3_registry/s3_registry.yml | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index eb8660f6c..026b24456 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -10,7 +10,7 @@ storage: accesskey: {{ accesskey }} secretkey: {{ secretkey }} region: us-east-1 - bucket: {{ bucketname }} + bucket: {{ clusterid }}-docker encrypt: true secure: true v4auth: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 61280df0b..30b873db3 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,17 +1,12 @@ --- -# This playbook creates an S3 bucket, if it doesn't already exist, and configures the docker registry service to use the bucket as its backend storage. +# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e bucketname="mybucket" -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e master="master fqdn or IP" -i "master," +# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" # -# Example: -# ansible-playbook s3_registry.yml -e accesskey="asdf" -e secretkey="hjkl" -e bucketname="testbucket" -e master="54.173.148.238" -i "54.173.148.238," -# -# The bucket name can be anything, but generally should correspond with your cluster name. # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. -# The 'master' param is the fqdn or public IP of your cluster's master. -# The -i param allows this playbook to be run on your master, even if it's not yet in your main inventory file. (The comma is mandatory). +# The 'clusterid' is the short name of your cluster. -- hosts: "{{ master }}" +- hosts: security_group_{{ clusterid }}_master remote_user: root gather_facts: False @@ -19,7 +14,7 @@ - name: Create S3 bucket local_action: - module: s3 bucket={{ bucketname|quote }} mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 -- cgit v1.2.3 From 2a4b5b7322c8b0c8e84aae43d1ff411259bf9b61 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 29 Sep 2015 11:16:25 -0400 Subject: added the grow_docker_vg adhoc playbook --- .../grow_docker_vg/filter_plugins/oo_filters.py | 41 +++++ playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 204 +++++++++++++++++++++ 2 files changed, 245 insertions(+) create mode 100644 playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py create mode 100644 playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py new file mode 100644 index 000000000..d0264cde9 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom filters for use in openshift-ansible +''' + +import pdb + + +class FilterModule(object): + ''' Custom ansible filters ''' + + @staticmethod + def oo_pdb(arg): + ''' This pops you into a pdb instance where arg is the data passed in + from the filter. + Ex: "{{ hostvars | oo_pdb }}" + ''' + pdb.set_trace() + return arg + + @staticmethod + def translate_volume_name(volumes, target_volume): + ''' + This filter matches a device string /dev/sdX to /dev/xvdX + It will then return the AWS volume ID + ''' + for vol in volumes: + translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd") + if target_volume.startswith(translated_name): + return vol["id"] + + return None + + + def filters(self): + ''' returns a mapping of filters to methods ''' + return { + "translate_volume_name": self.translate_volume_name, + } diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml new file mode 100644 index 000000000..a88553ac0 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -0,0 +1,204 @@ +--- +# This playbook grows the docker VG on a node by: +# * add a new volume +# * add volume to the existing VG. +# * pv move to the new volume. +# * remove old volume +# * detach volume +# * mark old volume in AWS with "REMOVE ME" tag +# * grow docker LVM to 90% of the VG +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_tag_name=' grow_docker_vg.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml +# +# Notes: +# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * This does a GP2 by default. Support for Provisioned IOPS has not been added +# * This will assign the new volume to /dev/xvdc. This is not variablized, yet. +# * This can be done with NO downtime on the host +# + +- name: Grow the docker volume group + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: gp2 + cli_volume_size: 55 +# cli_volume_iops: "{{ 30 * cli_volume_size }}" + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if Storage Driver (docker info) is devicemapper + shell: docker info | grep 'Storage Driver:.*devicemapper' + register: device_mapper_check + ignore_errors: yes + + - debug: + var: device_mapper_check + + - name: fail if we don't detect devicemapper + fail: + msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually. + when: device_mapper_check.rc == 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the volume group. + - name: Attempt to find the Volume Group that docker is using + shell: lvs | grep docker-pool | awk '{print $2}' + register: docker_vg_name + ignore_errors: yes + + - debug: + var: docker_vg_name + + - name: fail if we don't find a docker volume group + fail: + msg: Unable to find docker volume group. Please investigate manually. + when: docker_vg_name.stdout_lines|length != 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the physical volume. + - name: Attempt to find the Phyisical Volume that docker is using + shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'" + register: docker_pv_name + ignore_errors: yes + + - debug: + var: docker_pv_name + + - name: fail if we don't find a docker physical volume + fail: + msg: Unable to find docker physical volume. Please investigate manually. + when: docker_pv_name.stdout_lines|length != 1 + + + - name: get list of volumes from AWS + delegate_to: localhost + ec2_vol: + state: list + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + register: attached_volumes + + - debug: var=attached_volumes + + - name: get volume id of current docker volume + set_fact: + old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}" + + - debug: var=old_docker_volume_id + + - name: check to see if /dev/xvdc exists + command: test -e /dev/xvdc + register: xvdc_check + ignore_errors: yes + + - debug: var=xvdc_check + + - name: fail if /dev/xvdc already exists + fail: + msg: /dev/xvdc already exists. Please investigate + when: xvdc_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdc + register: create_volume + + - debug: var=create_volume + + - name: Fail when problems creating volumes and attaching + fail: + msg: "Failed to create or attach volume msg: {{ create_volume.msg }}" + when: create_volume.msg is defined + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: check for attached drive + command: test -b /dev/xvdc + register: attachment_check + until: attachment_check.rc == 0 + retries: 30 + delay: 2 + + - name: partition the new drive and make it lvm + command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm + + - name: pvcreate /dev/xvdc + command: pvcreate /dev/xvdc1 + + - name: Extend the docker volume group + command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1 + + - name: pvmove onto new volume + command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" + async: 3600 + poll: 10 + + - name: Remove the old docker drive from the volume group + command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}" + + - name: Remove the pv from the old drive + command: "pvremove {{ docker_pv_name.stdout }}" + + - name: Extend the docker lvm + command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool" + + - name: detach old docker volume + delegate_to: localhost + ec2_vol: + region: "{{ ec2_region }}" + id: "{{ old_docker_volume_id }}" + instance: None + + - name: tag the old vol valid label + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}} + args: + tags: + Name: "{{ ec2_tag_Name }} REMOVE ME" + register: voltags + + - name: Update the /etc/sysconfig/docker-storage-setup with new device + lineinfile: + dest: /etc/sysconfig/docker-storage-setup + regexp: ^DEVS= + line: DEVS=/dev/xvdc -- cgit v1.2.3 From 11f0f570243d39bb2e96bfd64ef9d180163c5c38 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 29 Sep 2015 11:40:42 -0400 Subject: added comment to the grow_docker_vg playbook --- playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index a88553ac0..ef9b45abd 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -24,6 +24,8 @@ # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host +# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is +# the LV that gets created via the "docker-storage-setup" command # - name: Grow the docker volume group -- cgit v1.2.3 From 832bc1f47fce1b61f23259502c08f9253656cb38 Mon Sep 17 00:00:00 2001 From: Jaroslav Henner Date: Wed, 30 Sep 2015 20:51:35 +0200 Subject: Prevent dns resolution recursion (loop). The dnsmasq should not be resolving the example.com recursively, because in case that we have /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf: server=/example.com/192.168.55.1 the dnsmasq will be asking itself, therefore a dns resolution loop is created, which causes Maximum number of concurrent DNS queries reached (max: 150) and performance degradation of dns resolution on the whole hypervizor and guests. This patch will fix that in the domain.xml, which will cause adding local=/example.com/ to the /var/lib/libvirt/dnsmasq/openshift-ansible.conf, effectively fixing the problem. --- playbooks/libvirt/openshift-cluster/templates/network.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml index 86dcd62bb..050bc7ab9 100644 --- a/playbooks/libvirt/openshift-cluster/templates/network.xml +++ b/playbooks/libvirt/openshift-cluster/templates/network.xml @@ -8,7 +8,7 @@ - + -- cgit v1.2.3 From a22fbd327ab9decda9543d47c1ba375b9faecffd Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Tue, 18 Aug 2015 10:46:23 +0200 Subject: GCE-support (more information in PR, README_GCE.md) --- README_GCE.md | 17 +++++- bin/cluster | 12 ++-- inventory/gce/hosts/gce.py | 14 ++++- inventory/openstack/hosts/nova.py | 2 +- .../set_infra_launch_facts_tasks.yml | 15 +++++ playbooks/gce/openshift-cluster/config.yml | 4 ++ playbooks/gce/openshift-cluster/join_node.yml | 64 ++++++++++++++++++++++ playbooks/gce/openshift-cluster/launch.yml | 2 +- playbooks/gce/openshift-cluster/list.yml | 4 +- .../openshift-cluster/tasks/launch_instances.yml | 14 +++-- playbooks/gce/openshift-cluster/terminate.yml | 55 ++++++++++++------- playbooks/gce/openshift-cluster/vars.yml | 8 ++- .../openshift-cluster/files/heat_stack.yaml | 20 ++++++- playbooks/openstack/openshift-cluster/launch.yml | 35 ++++++++++-- roles/openshift_facts/tasks/main.yml | 2 +- roles/openshift_manage_node/tasks/main.yml | 2 +- roles/openshift_master/tasks/main.yml | 11 +++- roles/openshift_node/tasks/main.yml | 8 ++- 18 files changed, 234 insertions(+), 55 deletions(-) create mode 100644 playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml create mode 100644 playbooks/gce/openshift-cluster/join_node.yml (limited to 'playbooks') diff --git a/README_GCE.md b/README_GCE.md index f6c5138c1..50f8ade70 100644 --- a/README_GCE.md +++ b/README_GCE.md @@ -39,6 +39,13 @@ Create a gce.ini file for GCE * gce_service_account_pem_file_path - Full path from previous steps * gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID" +Mandatory customization variables (check the values according to your tenant): +* zone = europe-west1-d +* network = default +* gce_machine_type = n1-standard-2 +* gce_machine_image = preinstalled-slave-50g-v5 + + 1. vi ~/.gce/gce.ini 1. make the contents look like this: ``` @@ -46,11 +53,15 @@ Create a gce.ini file for GCE gce_service_account_email_address = long...@developer.gserviceaccount.com gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem gce_project_id = project_id +zone = europe-west1-d +network = default +gce_machine_type = n1-standard-2 +gce_machine_image = preinstalled-slave-50g-v5 + ``` -1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py) +1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it ``` - cd openshift-ansible/inventory/gce - ln -s ~/.gce/gce.ini gce.ini +export GCE_INI_PATH=~/.gce/gce.ini ``` diff --git a/bin/cluster b/bin/cluster index 582327415..e72ce547c 100755 --- a/bin/cluster +++ b/bin/cluster @@ -142,10 +142,14 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - config.readfp(open('inventory/gce/hosts/gce.ini')) + gce_ini_default_path = os.path.join( + 'inventory/gce/hosts/gce.ini') + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + if os.path.exists(gce_ini_path): + config.readfp(open(gce_ini_path)) - for key in config.options('gce'): - os.environ[key] = config.get('gce', key) + for key in config.options('gce'): + os.environ[key] = config.get('gce', key) inventory = '-i inventory/gce/hosts' elif 'aws' == provider: @@ -193,7 +197,7 @@ class Cluster(object): if args.option: for opt in args.option: k, v = opt.split('=', 1) - env['cli_' + k] = v + env[k] = v ansible_env = '-e \'{}\''.format( ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 3403f735e..bf018f1fe 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -120,6 +120,8 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + print "GCE INI PATH :: "+gce_ini_path + # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -173,6 +175,10 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + sys.stderr.write("GCE_EMAIL : "+args[0]+"\n") + sys.stderr.write("GCE_PEM_FILE_PATH : "+args[1]+"\n") + sys.stderr.write("GCE_PROJECT : "+kwargs['project']+"\n") + # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -211,7 +217,8 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], + # Hosts don't always have a public IP name + #'gce_public_ip': inst.public_ips[0], 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -219,8 +226,8 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] + # Hosts don't always have a public IP name + #'ansible_ssh_host': inst.public_ips[0] } def get_instance(self, instance_name): @@ -284,4 +291,5 @@ class GceInventory(object): # Run the script +print "Hello world" GceInventory() diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py index d5bd8d1ee..3197a57bc 100755 --- a/inventory/openstack/hosts/nova.py +++ b/inventory/openstack/hosts/nova.py @@ -34,7 +34,7 @@ except ImportError: # executed with no parameters, return the list of # all groups and hosts -NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", +NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"), os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] diff --git a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml new file mode 100644 index 000000000..0fd53eb7d --- /dev/null +++ b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml @@ -0,0 +1,15 @@ +--- +- set_fact: k8s_type=infra +- set_fact: sub_host_type="{{ type }}" +- set_fact: number_infra="{{ count }}" + +- name: Generate infra instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + register: infra_names_output + with_sequence: count={{ number_infra }} + +- set_fact: + infra_names: "{{ infra_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index fd5dfcc72..7bd3f1a56 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -10,6 +10,8 @@ - set_fact: g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" + use_sdn: "{{ do_we_use_openshift_sdn }}" + sdn_plugin: "{{ sdn_network_plugin }}" - include: ../../common/openshift-cluster/config.yml vars: @@ -22,3 +24,5 @@ openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" + openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}" + os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml new file mode 100644 index 000000000..613bbb34f --- /dev/null +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -0,0 +1,64 @@ +--- +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ node_ip }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ node_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_nodes_to_config + + - name: Add to preemptible group if needed + add_host: + name: "{{ node_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_preemptible_nodes + when: preemptible is defined and preemptible == "true" + + - name: Add to not preemptible group if needed + add_host: + name: "{{ node_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_non_preemptible_nodes + when: preemptible is defined and preemptible == "false" + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_first_master + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +#- include: config.yml +- include: ../../common/openshift-node/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ansible_default_ipv4.address }}" + openshift_use_openshift_sdn: true + os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 7a3b80da0..762fa9e8d 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -28,7 +28,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 5ba0f5a48..f5f89baf0 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,11 +14,11 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) - name: List instance(s) hosts: oo_list_hosts gather_facts: no tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" + msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 6307ecc27..f569b2a37 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -10,18 +10,22 @@ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + network: "{{ lookup('env', 'network') }}" +# unsupported in 1.9.+ + #service_account_permissions: "datastore,logging-write" tags: - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - env-{{ cluster }} - host-type-{{ type }} - - sub-host-type-{{ sub_host_type }} + - sub-host-type-{{ g_sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} register: gce - name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_host: "{{ item.name }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" @@ -30,13 +34,13 @@ with_items: gce.instance_data - name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} + wait_for: port=22 host={{ item.name }} with_items: gce.instance_data - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" register: result until: result.rc == 0 - retries: 20 - delay: 10 + retries: 30 + delay: 5 with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 098b0df73..f705745d9 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,25 +1,18 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node + - set_fact: scratch_group=tag_env-{{ cluster_id }} - add_host: name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_nodes_to_terminate + groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_masters_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -32,14 +25,34 @@ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- include: ../openshift-node/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +- name: Terminate instances(s) + hosts: localhost + connection: local + gather_facts: no + vars_files: + - vars.yml + tasks: + + - name: Terminate instances that were previously launched + local_action: + module: gce + state: 'absent' + name: "{{ item }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + with_items: groups['oo_hosts_to_terminate'] | default([]) + when: item is defined -- include: ../openshift-master/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +#- include: ../openshift-node/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +# +#- include: ../openshift-master/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ae33083b9..6de007807 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,8 +1,11 @@ --- +do_we_use_openshift_sdn: true +sdn_network_plugin: redhat/openshift-ovs-subnet +# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation deployment_vars: origin: - image: centos-7 - ssh_user: + image: preinstalled-slave-50g-v5 + ssh_user: root sudo: yes online: image: libra-rhel7 @@ -12,4 +15,3 @@ deployment_vars: image: rhel-7 ssh_user: sudo: yes - diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 40e4ab22c..e3e2b6872 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -88,6 +88,12 @@ parameters: label: Infra flavor description: Flavor of the infra node servers + key_pair: + type: string + label: Key name + description: Name of the key + + outputs: master_names: @@ -250,6 +256,14 @@ resources: port_range_max: 10250 remote_mode: remote_group_id remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 30001 + port_range_max: 30001 + - direction: ingress + protocol: tcp + port_range_min: 30850 + port_range_max: 30850 infra-secgrp: type: OS::Neutron::SecurityGroup @@ -291,7 +305,7 @@ resources: type: master image: { get_param: master_image } flavor: { get_param: master_flavor } - key_name: { get_resource: keypair } + key_name: { get_param: key_pair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: @@ -323,7 +337,7 @@ resources: subtype: compute image: { get_param: node_image } flavor: { get_param: node_flavor } - key_name: { get_resource: keypair } + key_name: { get_param: key_pair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: @@ -355,7 +369,7 @@ resources: subtype: infra image: { get_param: infra_image } flavor: { get_param: infra_flavor } - key_name: { get_resource: keypair } + key_name: { get_param: key_pair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 651aef40b..5f1780476 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -19,15 +19,32 @@ changed_when: false failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - set_fact: - heat_stack_action: 'stack-create' + - name: Create OpenStack Stack + command: 'heat stack-create -f {{ openstack_infra_heat_stack }} + -P key_pair={{ openstack_ssh_keypair }} + -P cluster_id={{ cluster_id }} + -P dns_nameservers={{ openstack_network_dns | join(",") }} + -P cidr={{ openstack_network_cidr }} + -P ssh_incoming={{ openstack_ssh_access_from }} + -P num_masters={{ num_masters }} + -P num_nodes={{ num_nodes }} + -P num_infra={{ num_infra }} + -P master_image={{ deployment_vars[deployment_type].image }} + -P node_image={{ deployment_vars[deployment_type].image }} + -P infra_image={{ deployment_vars[deployment_type].image }} + -P master_flavor={{ openstack_flavor["master"] }} + -P node_flavor={{ openstack_flavor["node"] }} + -P infra_flavor={{ openstack_flavor["infra"] }} + -P ssh_public_key="{{ openstack_ssh_public_key }}" + openshift-ansible-{{ cluster_id }}-stack' when: stack_show_result.rc == 1 - set_fact: heat_stack_action: 'stack-update' when: stack_show_result.rc == 0 - - name: Create or Update OpenStack Stack - command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} + - name: Update OpenStack Stack + command: 'heat stack-update -f {{ openstack_infra_heat_stack }} + -P key_pair={{ openstack_ssh_keypair }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} -P dns_nameservers={{ openstack_network_dns | join(",") }} @@ -50,7 +67,7 @@ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' register: stack_show_status_result until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 30 + retries: 300 delay: 1 failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] @@ -119,4 +136,12 @@ - include: update.yml +# Fix icmp reject iptables rules +# It should be solved in openshift-sdn but unfortunately it's not the case +# Mysterious +- name: Configuring Nodes for RBox + hosts: oo_nodes_to_config + roles: + - rbox-node + - include: list.yml diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index fd3d20800..6301d4fc0 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 +- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1 assert: that: - ansible_version | version_compare('1.8.0', 'ge') diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 7c4f45ce6..94d7879b2 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -3,7 +3,7 @@ {{ openshift.common.client_binary }} get node {{ item }} register: omd_get_node until: omd_get_node.rc == 0 - retries: 10 + retries: 20 delay: 5 with_items: openshift_nodes diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index fa12005ab..96cc4d9af 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -8,6 +8,15 @@ - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods when: openshift_master_oauth_grant_method is defined +- name: Displaying openshift_master_ha + debug: var=openshift_master_ha + +- name: openshift_master_cluster_password + debug: var=openshift_master_cluster_password + +- name: openshift.master.cluster_defer_ha + debug: var=openshift.master.cluster_defer_ha + - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined @@ -23,7 +32,7 @@ api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" - public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default('https://' ~ openshift.common.public_ip ~ ':8443') }}" console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e8cc499c0..7e5ac2b5b 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -22,7 +22,7 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - labels: "{{ openshift_node_labels | default(none) }}" + labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default() ) }}" annotations: "{{ openshift_node_annotations | default(none) }}" registry_url: "{{ oreg_url | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" @@ -73,6 +73,12 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ +--insecure-registry=dockerhub.rnd.amadeus.net:5000 \ +--insecure-registry=dockerhub.rnd.amadeus.net:5001 \ +--insecure-registry=dockerhub.rnd.amadeus.net:5002 \ +--add-registry=dockerhub.rnd.amadeus.net:5000 \ +--add-registry=dockerhub.rnd.amadeus.net:5001 \ +--add-registry=dockerhub.rnd.amadeus.net:5002 \ {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'" when: docker_check.stat.isreg notify: -- cgit v1.2.3 From 9229927a98389f0dae2abb51e1df971f9457afb3 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Thu, 1 Oct 2015 15:33:32 +0200 Subject: oo_option fixed, some clean up --- bin/cluster | 2 +- inventory/gce/hosts/gce.py | 7 +------ playbooks/gce/openshift-cluster/join_node.yml | 17 +---------------- .../openstack/openshift-cluster/files/heat_stack.yaml | 8 -------- 4 files changed, 3 insertions(+), 31 deletions(-) (limited to 'playbooks') diff --git a/bin/cluster b/bin/cluster index e72ce547c..0e305141f 100755 --- a/bin/cluster +++ b/bin/cluster @@ -197,7 +197,7 @@ class Cluster(object): if args.option: for opt in args.option: k, v = opt.split('=', 1) - env[k] = v + env['cli_' + k] = v ansible_env = '-e \'{}\''.format( ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index bf018f1fe..6ed12e011 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -120,7 +120,6 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - print "GCE INI PATH :: "+gce_ini_path # Create a ConfigParser. # This provides empty defaults to each key, so that environment @@ -175,10 +174,7 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - sys.stderr.write("GCE_EMAIL : "+args[0]+"\n") - sys.stderr.write("GCE_PEM_FILE_PATH : "+args[1]+"\n") - sys.stderr.write("GCE_PROJECT : "+kwargs['project']+"\n") - + # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -291,5 +287,4 @@ class GceInventory(object): # Run the script -print "Hello world" GceInventory() diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml index 613bbb34f..0dfa3e9d7 100644 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -27,22 +27,6 @@ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: oo_nodes_to_config - - name: Add to preemptible group if needed - add_host: - name: "{{ node_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_preemptible_nodes - when: preemptible is defined and preemptible == "true" - - - name: Add to not preemptible group if needed - add_host: - name: "{{ node_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_non_preemptible_nodes - when: preemptible is defined and preemptible == "false" - - name: Evaluate oo_first_master add_host: name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" @@ -59,6 +43,7 @@ openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" openshift_use_openshift_sdn: true + openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index e3e2b6872..cd2636c9c 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -256,14 +256,6 @@ resources: port_range_max: 10250 remote_mode: remote_group_id remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 30001 - port_range_max: 30001 - - direction: ingress - protocol: tcp - port_range_min: 30850 - port_range_max: 30850 infra-secgrp: type: OS::Neutron::SecurityGroup -- cgit v1.2.3 From b8dcab08624bfdc4e89a144b82caa68883d1f861 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Thu, 1 Oct 2015 15:45:45 +0200 Subject: Removed some application specific code like insecure registries, heat template --- playbooks/openstack/openshift-cluster/files/heat_stack.yaml | 12 +++--------- roles/openshift_master/tasks/main.yml | 9 --------- roles/openshift_node/tasks/main.yml | 6 ------ 3 files changed, 3 insertions(+), 24 deletions(-) (limited to 'playbooks') diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index cd2636c9c..40e4ab22c 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -88,12 +88,6 @@ parameters: label: Infra flavor description: Flavor of the infra node servers - key_pair: - type: string - label: Key name - description: Name of the key - - outputs: master_names: @@ -297,7 +291,7 @@ resources: type: master image: { get_param: master_image } flavor: { get_param: master_flavor } - key_name: { get_param: key_pair } + key_name: { get_resource: keypair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: @@ -329,7 +323,7 @@ resources: subtype: compute image: { get_param: node_image } flavor: { get_param: node_flavor } - key_name: { get_param: key_pair } + key_name: { get_resource: keypair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: @@ -361,7 +355,7 @@ resources: subtype: infra image: { get_param: infra_image } flavor: { get_param: infra_flavor } - key_name: { get_param: key_pair } + key_name: { get_resource: keypair } net: { get_resource: net } subnet: { get_resource: subnet } secgrp: diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 96cc4d9af..88940ec8c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -8,15 +8,6 @@ - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods when: openshift_master_oauth_grant_method is defined -- name: Displaying openshift_master_ha - debug: var=openshift_master_ha - -- name: openshift_master_cluster_password - debug: var=openshift_master_cluster_password - -- name: openshift.master.cluster_defer_ha - debug: var=openshift.master.cluster_defer_ha - - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 7e5ac2b5b..96cd96315 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -73,12 +73,6 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ ---insecure-registry=dockerhub.rnd.amadeus.net:5000 \ ---insecure-registry=dockerhub.rnd.amadeus.net:5001 \ ---insecure-registry=dockerhub.rnd.amadeus.net:5002 \ ---add-registry=dockerhub.rnd.amadeus.net:5000 \ ---add-registry=dockerhub.rnd.amadeus.net:5001 \ ---add-registry=dockerhub.rnd.amadeus.net:5002 \ {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'" when: docker_check.stat.isreg notify: -- cgit v1.2.3 From 7748ce4463ca6d2e31b55c81ffc1418f8a999b55 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 1 Oct 2015 10:56:36 -0700 Subject: added 'missingok' to logrotate and disabled fluentd in online --- playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 1 + roles/openshift_facts/library/openshift_facts.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index b77bcdc1a..9c699120b 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -172,6 +172,7 @@ - rotate 7 - compress - sharedscripts + - missingok scripts: postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index f708f9bac..3ab3663de 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -296,9 +296,8 @@ def set_fluentd_facts_if_unset(facts): """ if 'common' in facts: - deployment_type = facts['common']['deployment_type'] if 'use_fluentd' not in facts['common']: - use_fluentd = True if deployment_type == 'online' else False + use_fluentd = False facts['common']['use_fluentd'] = use_fluentd return facts -- cgit v1.2.3 From 59fb7879501d702bb78a1d79326408b115a63c90 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Fri, 2 Oct 2015 12:04:57 +0000 Subject: openshift master public api url defaults to None, it will get its default value in openshift_facts commented infra-node code until it's solved --- playbooks/gce/openshift-cluster/launch.yml | 54 +++++++++++----------- .../openshift-cluster/tasks/launch_instances.yml | 7 +-- roles/openshift_master/tasks/main.yml | 2 +- 3 files changed, 32 insertions(+), 31 deletions(-) (limited to 'playbooks') diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 762fa9e8d..94e57fe4e 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -28,33 +28,33 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ infra_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - set_fact: - a_infra: "{{ infra_names[0] }}" - - add_host: name={{ a_infra }} groups=service_master - +# - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml +# vars: +# type: "infra" +# count: "{{ num_infra }}" +# - include: tasks/launch_instances.yml +# vars: +# instances: "{{ infra_names }}" +# cluster: "{{ cluster_id }}" +# type: "{{ k8s_type }}" +# g_sub_host_type: "{{ sub_host_type }}" +# +# - set_fact: +# a_infra: "{{ infra_names[0] }}" +# - add_host: name={{ a_infra }} groups=service_master +# - include: update.yml - -- name: Deploy OpenShift Services - hosts: service_master - connection: ssh - gather_facts: yes - roles: - - openshift_registry - - openshift_router - -- include: ../../common/openshift-cluster/create_services.yml - vars: - g_svc_master: "{{ service_master }}" +# +#- name: Deploy OpenShift Services +# hosts: service_master +# connection: ssh +# gather_facts: yes +# roles: +# - openshift_registry +# - openshift_router +# +#- include: ../../common/openshift-cluster/create_services.yml +# vars: +# g_svc_master: "{{ service_master }}" - include: list.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index f569b2a37..b07982305 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -20,6 +20,7 @@ - host-type-{{ type }} - sub-host-type-{{ g_sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} + when: instances |length > 0 register: gce - name: Add new instances to groups and set variables needed @@ -31,11 +32,11 @@ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data + with_items: gce.instance_data | default([]) - name: Wait for ssh wait_for: port=22 host={{ item.name }} - with_items: gce.instance_data + with_items: gce.instance_data | default([]) - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" @@ -43,4 +44,4 @@ until: result.rc == 0 retries: 30 delay: 5 - with_items: gce.instance_data + with_items: gce.instance_data | default([]) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 88940ec8c..fa12005ab 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -23,7 +23,7 @@ api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" - public_api_url: "{{ openshift_master_public_api_url | default('https://' ~ openshift.common.public_ip ~ ':8443') }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" -- cgit v1.2.3 From 3073d1f729f9dcd202088f6b318b465567c6344b Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Mon, 5 Oct 2015 13:48:41 -0400 Subject: Revert "GCE support" --- README_GCE.md | 17 ++----- bin/cluster | 12 ++--- inventory/gce/hosts/gce.py | 9 ++-- inventory/openstack/hosts/nova.py | 2 +- .../set_infra_launch_facts_tasks.yml | 15 ------ playbooks/gce/openshift-cluster/config.yml | 4 -- playbooks/gce/openshift-cluster/join_node.yml | 49 ------------------- playbooks/gce/openshift-cluster/launch.yml | 54 ++++++++++----------- playbooks/gce/openshift-cluster/list.yml | 4 +- .../openshift-cluster/tasks/launch_instances.yml | 21 ++++----- playbooks/gce/openshift-cluster/terminate.yml | 55 +++++++++------------- playbooks/gce/openshift-cluster/vars.yml | 8 ++-- playbooks/openstack/openshift-cluster/launch.yml | 35 ++------------ roles/openshift_facts/tasks/main.yml | 2 +- roles/openshift_manage_node/tasks/main.yml | 2 +- roles/openshift_node/tasks/main.yml | 2 +- 16 files changed, 80 insertions(+), 211 deletions(-) delete mode 100644 playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml delete mode 100644 playbooks/gce/openshift-cluster/join_node.yml (limited to 'playbooks') diff --git a/README_GCE.md b/README_GCE.md index 50f8ade70..f6c5138c1 100644 --- a/README_GCE.md +++ b/README_GCE.md @@ -39,13 +39,6 @@ Create a gce.ini file for GCE * gce_service_account_pem_file_path - Full path from previous steps * gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID" -Mandatory customization variables (check the values according to your tenant): -* zone = europe-west1-d -* network = default -* gce_machine_type = n1-standard-2 -* gce_machine_image = preinstalled-slave-50g-v5 - - 1. vi ~/.gce/gce.ini 1. make the contents look like this: ``` @@ -53,15 +46,11 @@ Mandatory customization variables (check the values according to your tenant): gce_service_account_email_address = long...@developer.gserviceaccount.com gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem gce_project_id = project_id -zone = europe-west1-d -network = default -gce_machine_type = n1-standard-2 -gce_machine_image = preinstalled-slave-50g-v5 - ``` -1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it +1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py) ``` -export GCE_INI_PATH=~/.gce/gce.ini + cd openshift-ansible/inventory/gce + ln -s ~/.gce/gce.ini gce.ini ``` diff --git a/bin/cluster b/bin/cluster index 0e305141f..582327415 100755 --- a/bin/cluster +++ b/bin/cluster @@ -142,14 +142,10 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - gce_ini_default_path = os.path.join( - 'inventory/gce/hosts/gce.ini') - gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - if os.path.exists(gce_ini_path): - config.readfp(open(gce_ini_path)) - - for key in config.options('gce'): - os.environ[key] = config.get('gce', key) + config.readfp(open('inventory/gce/hosts/gce.ini')) + + for key in config.options('gce'): + os.environ[key] = config.get('gce', key) inventory = '-i inventory/gce/hosts' elif 'aws' == provider: diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 6ed12e011..3403f735e 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -120,7 +120,6 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -174,7 +173,6 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -213,8 +211,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - # Hosts don't always have a public IP name - #'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0], 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -222,8 +219,8 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't always have a public IP name - #'ansible_ssh_host': inst.public_ips[0] + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] } def get_instance(self, instance_name): diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py index 3197a57bc..d5bd8d1ee 100755 --- a/inventory/openstack/hosts/nova.py +++ b/inventory/openstack/hosts/nova.py @@ -34,7 +34,7 @@ except ImportError: # executed with no parameters, return the list of # all groups and hosts -NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"), +NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] diff --git a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml deleted file mode 100644 index 0fd53eb7d..000000000 --- a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- set_fact: k8s_type=infra -- set_fact: sub_host_type="{{ type }}" -- set_fact: number_infra="{{ count }}" - -- name: Generate infra instance names(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" - register: infra_names_output - with_sequence: count={{ number_infra }} - -- set_fact: - infra_names: "{{ infra_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 7bd3f1a56..fd5dfcc72 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -10,8 +10,6 @@ - set_fact: g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - use_sdn: "{{ do_we_use_openshift_sdn }}" - sdn_plugin: "{{ sdn_network_plugin }}" - include: ../../common/openshift-cluster/config.yml vars: @@ -24,5 +22,3 @@ openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" - openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}" - os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml deleted file mode 100644 index 0dfa3e9d7..000000000 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- name: Populate oo_hosts_to_update group - hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ node_ip }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- name: Populate oo_masters_to_config host group - hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ node_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_nodes_to_config - - - name: Evaluate oo_first_master - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_first_master - when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" - -#- include: config.yml -- include: ../../common/openshift-node/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ansible_default_ipv4.address }}" - openshift_use_openshift_sdn: true - openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " - os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" - osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" - osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 94e57fe4e..7a3b80da0 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -28,33 +28,33 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" -# - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml -# vars: -# type: "infra" -# count: "{{ num_infra }}" -# - include: tasks/launch_instances.yml -# vars: -# instances: "{{ infra_names }}" -# cluster: "{{ cluster_id }}" -# type: "{{ k8s_type }}" -# g_sub_host_type: "{{ sub_host_type }}" -# -# - set_fact: -# a_infra: "{{ infra_names[0] }}" -# - add_host: name={{ a_infra }} groups=service_master -# + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ infra_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - set_fact: + a_infra: "{{ infra_names[0] }}" + - add_host: name={{ a_infra }} groups=service_master + - include: update.yml -# -#- name: Deploy OpenShift Services -# hosts: service_master -# connection: ssh -# gather_facts: yes -# roles: -# - openshift_registry -# - openshift_router -# -#- include: ../../common/openshift-cluster/create_services.yml -# vars: -# g_svc_master: "{{ service_master }}" + +- name: Deploy OpenShift Services + hosts: service_master + connection: ssh + gather_facts: yes + roles: + - openshift_registry + - openshift_router + +- include: ../../common/openshift-cluster/create_services.yml + vars: + g_svc_master: "{{ service_master }}" - include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index f5f89baf0..5ba0f5a48 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,11 +14,11 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - name: List instance(s) hosts: oo_list_hosts gather_facts: no tasks: - debug: - msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" + msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index b07982305..6307ecc27 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -10,38 +10,33 @@ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - network: "{{ lookup('env', 'network') }}" -# unsupported in 1.9.+ - #service_account_permissions: "datastore,logging-write" tags: - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - env-{{ cluster }} - host-type-{{ type }} - - sub-host-type-{{ g_sub_host_type }} + - sub-host-type-{{ sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} - when: instances |length > 0 register: gce - name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data | default([]) + with_items: gce.instance_data - name: Wait for ssh - wait_for: port=22 host={{ item.name }} - with_items: gce.instance_data | default([]) + wait_for: port=22 host={{ item.public_ip }} + with_items: gce.instance_data - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" register: result until: result.rc == 0 - retries: 30 - delay: 5 - with_items: gce.instance_data | default([]) + retries: 20 + delay: 10 + with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index f705745d9..098b0df73 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,18 +1,25 @@ --- - name: Terminate instance(s) hosts: localhost - connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-{{ cluster_id }} + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node - add_host: name: "{{ item }}" - groups: oo_hosts_to_terminate + groups: oo_hosts_to_terminate, oo_nodes_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master + - add_host: + name: "{{ item }}" + groups: oo_hosts_to_terminate, oo_masters_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -25,34 +32,14 @@ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- name: Terminate instances(s) - hosts: localhost - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - name: "{{ item }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - with_items: groups['oo_hosts_to_terminate'] | default([]) - when: item is defined +- include: ../openshift-node/terminate.yml + vars: + gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + gce_project_id: "{{ lookup('env', 'gce_project_id') }}" -#- include: ../openshift-node/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" -# -#- include: ../openshift-master/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +- include: ../openshift-master/terminate.yml + vars: + gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index 6de007807..ae33083b9 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,11 +1,8 @@ --- -do_we_use_openshift_sdn: true -sdn_network_plugin: redhat/openshift-ovs-subnet -# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation deployment_vars: origin: - image: preinstalled-slave-50g-v5 - ssh_user: root + image: centos-7 + ssh_user: sudo: yes online: image: libra-rhel7 @@ -15,3 +12,4 @@ deployment_vars: image: rhel-7 ssh_user: sudo: yes + diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 5f1780476..651aef40b 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -19,32 +19,15 @@ changed_when: false failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - name: Create OpenStack Stack - command: 'heat stack-create -f {{ openstack_infra_heat_stack }} - -P key_pair={{ openstack_ssh_keypair }} - -P cluster_id={{ cluster_id }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} - -P cidr={{ openstack_network_cidr }} - -P ssh_incoming={{ openstack_ssh_access_from }} - -P num_masters={{ num_masters }} - -P num_nodes={{ num_nodes }} - -P num_infra={{ num_infra }} - -P master_image={{ deployment_vars[deployment_type].image }} - -P node_image={{ deployment_vars[deployment_type].image }} - -P infra_image={{ deployment_vars[deployment_type].image }} - -P master_flavor={{ openstack_flavor["master"] }} - -P node_flavor={{ openstack_flavor["node"] }} - -P infra_flavor={{ openstack_flavor["infra"] }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" - openshift-ansible-{{ cluster_id }}-stack' + - set_fact: + heat_stack_action: 'stack-create' when: stack_show_result.rc == 1 - set_fact: heat_stack_action: 'stack-update' when: stack_show_result.rc == 0 - - name: Update OpenStack Stack - command: 'heat stack-update -f {{ openstack_infra_heat_stack }} - -P key_pair={{ openstack_ssh_keypair }} + - name: Create or Update OpenStack Stack + command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} -P dns_nameservers={{ openstack_network_dns | join(",") }} @@ -67,7 +50,7 @@ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' register: stack_show_status_result until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 300 + retries: 30 delay: 1 failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] @@ -136,12 +119,4 @@ - include: update.yml -# Fix icmp reject iptables rules -# It should be solved in openshift-sdn but unfortunately it's not the case -# Mysterious -- name: Configuring Nodes for RBox - hosts: oo_nodes_to_config - roles: - - rbox-node - - include: list.yml diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 6301d4fc0..fd3d20800 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1 +- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 assert: that: - ansible_version | version_compare('1.8.0', 'ge') diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 94d7879b2..7c4f45ce6 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -3,7 +3,7 @@ {{ openshift.common.client_binary }} get node {{ item }} register: omd_get_node until: omd_get_node.rc == 0 - retries: 20 + retries: 10 delay: 5 with_items: openshift_nodes diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 96cd96315..e8cc499c0 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -22,7 +22,7 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default() ) }}" + labels: "{{ openshift_node_labels | default(none) }}" annotations: "{{ openshift_node_annotations | default(none) }}" registry_url: "{{ oreg_url | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" -- cgit v1.2.3 From 6e80868ad12cde826fbd919a365335935fb75c84 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Tue, 6 Oct 2015 10:13:27 +0200 Subject: playbooks/openstack/openshift-cluster/launch.yml back to its correct version --- playbooks/openstack/openshift-cluster/launch.yml | 35 ++++-------------------- 1 file changed, 5 insertions(+), 30 deletions(-) (limited to 'playbooks') diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 5f1780476..651aef40b 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -19,32 +19,15 @@ changed_when: false failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - name: Create OpenStack Stack - command: 'heat stack-create -f {{ openstack_infra_heat_stack }} - -P key_pair={{ openstack_ssh_keypair }} - -P cluster_id={{ cluster_id }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} - -P cidr={{ openstack_network_cidr }} - -P ssh_incoming={{ openstack_ssh_access_from }} - -P num_masters={{ num_masters }} - -P num_nodes={{ num_nodes }} - -P num_infra={{ num_infra }} - -P master_image={{ deployment_vars[deployment_type].image }} - -P node_image={{ deployment_vars[deployment_type].image }} - -P infra_image={{ deployment_vars[deployment_type].image }} - -P master_flavor={{ openstack_flavor["master"] }} - -P node_flavor={{ openstack_flavor["node"] }} - -P infra_flavor={{ openstack_flavor["infra"] }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" - openshift-ansible-{{ cluster_id }}-stack' + - set_fact: + heat_stack_action: 'stack-create' when: stack_show_result.rc == 1 - set_fact: heat_stack_action: 'stack-update' when: stack_show_result.rc == 0 - - name: Update OpenStack Stack - command: 'heat stack-update -f {{ openstack_infra_heat_stack }} - -P key_pair={{ openstack_ssh_keypair }} + - name: Create or Update OpenStack Stack + command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} -P dns_nameservers={{ openstack_network_dns | join(",") }} @@ -67,7 +50,7 @@ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' register: stack_show_status_result until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 300 + retries: 30 delay: 1 failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] @@ -136,12 +119,4 @@ - include: update.yml -# Fix icmp reject iptables rules -# It should be solved in openshift-sdn but unfortunately it's not the case -# Mysterious -- name: Configuring Nodes for RBox - hosts: oo_nodes_to_config - roles: - - rbox-node - - include: list.yml -- cgit v1.2.3 From a3ba0278879075e14373a6872acc5f0c3cc3d9a2 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Tue, 6 Oct 2015 16:59:00 +0200 Subject: Revert "Revert "GCE support"" This reverts commit 3073d1f729f9dcd202088f6b318b465567c6344b. --- README_GCE.md | 17 +++++-- bin/cluster | 12 +++-- inventory/gce/hosts/gce.py | 9 ++-- inventory/openstack/hosts/nova.py | 2 +- .../set_infra_launch_facts_tasks.yml | 15 ++++++ playbooks/gce/openshift-cluster/config.yml | 4 ++ playbooks/gce/openshift-cluster/join_node.yml | 49 +++++++++++++++++++ playbooks/gce/openshift-cluster/launch.yml | 54 ++++++++++----------- playbooks/gce/openshift-cluster/list.yml | 4 +- .../openshift-cluster/tasks/launch_instances.yml | 21 +++++---- playbooks/gce/openshift-cluster/terminate.yml | 55 +++++++++++++--------- playbooks/gce/openshift-cluster/vars.yml | 8 ++-- playbooks/openstack/openshift-cluster/launch.yml | 35 ++++++++++++-- roles/openshift_facts/tasks/main.yml | 2 +- roles/openshift_manage_node/tasks/main.yml | 2 +- roles/openshift_node/tasks/main.yml | 2 +- 16 files changed, 211 insertions(+), 80 deletions(-) create mode 100644 playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml create mode 100644 playbooks/gce/openshift-cluster/join_node.yml (limited to 'playbooks') diff --git a/README_GCE.md b/README_GCE.md index f6c5138c1..50f8ade70 100644 --- a/README_GCE.md +++ b/README_GCE.md @@ -39,6 +39,13 @@ Create a gce.ini file for GCE * gce_service_account_pem_file_path - Full path from previous steps * gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID" +Mandatory customization variables (check the values according to your tenant): +* zone = europe-west1-d +* network = default +* gce_machine_type = n1-standard-2 +* gce_machine_image = preinstalled-slave-50g-v5 + + 1. vi ~/.gce/gce.ini 1. make the contents look like this: ``` @@ -46,11 +53,15 @@ Create a gce.ini file for GCE gce_service_account_email_address = long...@developer.gserviceaccount.com gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem gce_project_id = project_id +zone = europe-west1-d +network = default +gce_machine_type = n1-standard-2 +gce_machine_image = preinstalled-slave-50g-v5 + ``` -1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py) +1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it ``` - cd openshift-ansible/inventory/gce - ln -s ~/.gce/gce.ini gce.ini +export GCE_INI_PATH=~/.gce/gce.ini ``` diff --git a/bin/cluster b/bin/cluster index 582327415..0e305141f 100755 --- a/bin/cluster +++ b/bin/cluster @@ -142,10 +142,14 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - config.readfp(open('inventory/gce/hosts/gce.ini')) - - for key in config.options('gce'): - os.environ[key] = config.get('gce', key) + gce_ini_default_path = os.path.join( + 'inventory/gce/hosts/gce.ini') + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + if os.path.exists(gce_ini_path): + config.readfp(open(gce_ini_path)) + + for key in config.options('gce'): + os.environ[key] = config.get('gce', key) inventory = '-i inventory/gce/hosts' elif 'aws' == provider: diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 3403f735e..6ed12e011 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -120,6 +120,7 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -173,6 +174,7 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -211,7 +213,8 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], + # Hosts don't always have a public IP name + #'gce_public_ip': inst.public_ips[0], 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -219,8 +222,8 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] + # Hosts don't always have a public IP name + #'ansible_ssh_host': inst.public_ips[0] } def get_instance(self, instance_name): diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py index d5bd8d1ee..3197a57bc 100755 --- a/inventory/openstack/hosts/nova.py +++ b/inventory/openstack/hosts/nova.py @@ -34,7 +34,7 @@ except ImportError: # executed with no parameters, return the list of # all groups and hosts -NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", +NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"), os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] diff --git a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml new file mode 100644 index 000000000..0fd53eb7d --- /dev/null +++ b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml @@ -0,0 +1,15 @@ +--- +- set_fact: k8s_type=infra +- set_fact: sub_host_type="{{ type }}" +- set_fact: number_infra="{{ count }}" + +- name: Generate infra instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + register: infra_names_output + with_sequence: count={{ number_infra }} + +- set_fact: + infra_names: "{{ infra_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index fd5dfcc72..7bd3f1a56 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -10,6 +10,8 @@ - set_fact: g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" + use_sdn: "{{ do_we_use_openshift_sdn }}" + sdn_plugin: "{{ sdn_network_plugin }}" - include: ../../common/openshift-cluster/config.yml vars: @@ -22,3 +24,5 @@ openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" + openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}" + os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml new file mode 100644 index 000000000..0dfa3e9d7 --- /dev/null +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -0,0 +1,49 @@ +--- +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ node_ip }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ node_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_nodes_to_config + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_first_master + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +#- include: config.yml +- include: ../../common/openshift-node/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ansible_default_ipv4.address }}" + openshift_use_openshift_sdn: true + openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " + os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 7a3b80da0..94e57fe4e 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -28,33 +28,33 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ infra_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - set_fact: - a_infra: "{{ infra_names[0] }}" - - add_host: name={{ a_infra }} groups=service_master - +# - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml +# vars: +# type: "infra" +# count: "{{ num_infra }}" +# - include: tasks/launch_instances.yml +# vars: +# instances: "{{ infra_names }}" +# cluster: "{{ cluster_id }}" +# type: "{{ k8s_type }}" +# g_sub_host_type: "{{ sub_host_type }}" +# +# - set_fact: +# a_infra: "{{ infra_names[0] }}" +# - add_host: name={{ a_infra }} groups=service_master +# - include: update.yml - -- name: Deploy OpenShift Services - hosts: service_master - connection: ssh - gather_facts: yes - roles: - - openshift_registry - - openshift_router - -- include: ../../common/openshift-cluster/create_services.yml - vars: - g_svc_master: "{{ service_master }}" +# +#- name: Deploy OpenShift Services +# hosts: service_master +# connection: ssh +# gather_facts: yes +# roles: +# - openshift_registry +# - openshift_router +# +#- include: ../../common/openshift-cluster/create_services.yml +# vars: +# g_svc_master: "{{ service_master }}" - include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 5ba0f5a48..f5f89baf0 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,11 +14,11 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) - name: List instance(s) hosts: oo_list_hosts gather_facts: no tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" + msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 6307ecc27..b07982305 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -10,33 +10,38 @@ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + network: "{{ lookup('env', 'network') }}" +# unsupported in 1.9.+ + #service_account_permissions: "datastore,logging-write" tags: - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - env-{{ cluster }} - host-type-{{ type }} - - sub-host-type-{{ sub_host_type }} + - sub-host-type-{{ g_sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} + when: instances |length > 0 register: gce - name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_host: "{{ item.name }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data + with_items: gce.instance_data | default([]) - name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} - with_items: gce.instance_data + wait_for: port=22 host={{ item.name }} + with_items: gce.instance_data | default([]) - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" register: result until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data + retries: 30 + delay: 5 + with_items: gce.instance_data | default([]) diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 098b0df73..f705745d9 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,25 +1,18 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node + - set_fact: scratch_group=tag_env-{{ cluster_id }} - add_host: name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_nodes_to_terminate + groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_masters_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -32,14 +25,34 @@ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- include: ../openshift-node/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +- name: Terminate instances(s) + hosts: localhost + connection: local + gather_facts: no + vars_files: + - vars.yml + tasks: + + - name: Terminate instances that were previously launched + local_action: + module: gce + state: 'absent' + name: "{{ item }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + with_items: groups['oo_hosts_to_terminate'] | default([]) + when: item is defined -- include: ../openshift-master/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +#- include: ../openshift-node/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +# +#- include: ../openshift-master/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ae33083b9..6de007807 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,8 +1,11 @@ --- +do_we_use_openshift_sdn: true +sdn_network_plugin: redhat/openshift-ovs-subnet +# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation deployment_vars: origin: - image: centos-7 - ssh_user: + image: preinstalled-slave-50g-v5 + ssh_user: root sudo: yes online: image: libra-rhel7 @@ -12,4 +15,3 @@ deployment_vars: image: rhel-7 ssh_user: sudo: yes - diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 651aef40b..5f1780476 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -19,15 +19,32 @@ changed_when: false failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - set_fact: - heat_stack_action: 'stack-create' + - name: Create OpenStack Stack + command: 'heat stack-create -f {{ openstack_infra_heat_stack }} + -P key_pair={{ openstack_ssh_keypair }} + -P cluster_id={{ cluster_id }} + -P dns_nameservers={{ openstack_network_dns | join(",") }} + -P cidr={{ openstack_network_cidr }} + -P ssh_incoming={{ openstack_ssh_access_from }} + -P num_masters={{ num_masters }} + -P num_nodes={{ num_nodes }} + -P num_infra={{ num_infra }} + -P master_image={{ deployment_vars[deployment_type].image }} + -P node_image={{ deployment_vars[deployment_type].image }} + -P infra_image={{ deployment_vars[deployment_type].image }} + -P master_flavor={{ openstack_flavor["master"] }} + -P node_flavor={{ openstack_flavor["node"] }} + -P infra_flavor={{ openstack_flavor["infra"] }} + -P ssh_public_key="{{ openstack_ssh_public_key }}" + openshift-ansible-{{ cluster_id }}-stack' when: stack_show_result.rc == 1 - set_fact: heat_stack_action: 'stack-update' when: stack_show_result.rc == 0 - - name: Create or Update OpenStack Stack - command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} + - name: Update OpenStack Stack + command: 'heat stack-update -f {{ openstack_infra_heat_stack }} + -P key_pair={{ openstack_ssh_keypair }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} -P dns_nameservers={{ openstack_network_dns | join(",") }} @@ -50,7 +67,7 @@ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' register: stack_show_status_result until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 30 + retries: 300 delay: 1 failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] @@ -119,4 +136,12 @@ - include: update.yml +# Fix icmp reject iptables rules +# It should be solved in openshift-sdn but unfortunately it's not the case +# Mysterious +- name: Configuring Nodes for RBox + hosts: oo_nodes_to_config + roles: + - rbox-node + - include: list.yml diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index fd3d20800..6301d4fc0 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 +- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1 assert: that: - ansible_version | version_compare('1.8.0', 'ge') diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 7c4f45ce6..94d7879b2 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -3,7 +3,7 @@ {{ openshift.common.client_binary }} get node {{ item }} register: omd_get_node until: omd_get_node.rc == 0 - retries: 10 + retries: 20 delay: 5 with_items: openshift_nodes diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e8cc499c0..96cd96315 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -22,7 +22,7 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - labels: "{{ openshift_node_labels | default(none) }}" + labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default() ) }}" annotations: "{{ openshift_node_annotations | default(none) }}" registry_url: "{{ oreg_url | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" -- cgit v1.2.3 From f1ee60e1781735486c57a15c83104c7228a158cc Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 7 Oct 2015 14:25:02 -0400 Subject: Removed io1 type for gp2 --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index c9ae923bb..82870664c 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -27,9 +27,8 @@ gather_facts: no vars: - cli_volume_type: io1 + cli_volume_type: gp2 cli_volume_size: 30 - cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: @@ -104,7 +103,6 @@ volume_size: "{{ cli_volume_size | default(30, True)}}" volume_type: "{{ cli_volume_type }}" device_name: /dev/xvdb - iops: "{{ 30 * cli_volume_size }}" register: vol - debug: var=vol -- cgit v1.2.3 From b6fe5ba80bb131543bd09374df88821c8754da64 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 7 Oct 2015 14:55:43 -0400 Subject: Removing the last step as it will fail. --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 7 ------- 1 file changed, 7 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 82870664c..b6a2d2f26 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -140,10 +140,3 @@ - debug: var=dockerstart - - name: Wait for docker to stabilize - pause: - seconds: 30 - - # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - - name: update zabbix docker items - command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py -- cgit v1.2.3 From a8171a639bd4500f30e72233587e9f6335202438 Mon Sep 17 00:00:00 2001 From: Chengcheng Mu Date: Fri, 9 Oct 2015 16:27:25 +0200 Subject: Adding second param. true to many default filters --- playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml | 2 +- playbooks/gce/openshift-cluster/list.yml | 2 +- playbooks/gce/openshift-cluster/tasks/launch_instances.yml | 6 +++--- playbooks/gce/openshift-cluster/terminate.yml | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml index 0fd53eb7d..01d70a1a6 100644 --- a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml @@ -10,6 +10,6 @@ with_sequence: count={{ number_infra }} - set_fact: - infra_names: "{{ infra_names_output.results | default([]) + infra_names: "{{ infra_names_output.results | default([], true) | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index f5f89baf0..53b2b9a5e 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,7 +14,7 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) + with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) - name: List instance(s) hosts: oo_list_hosts diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index b07982305..e300b5b5a 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -32,11 +32,11 @@ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data | default([]) + with_items: gce.instance_data | default([], true) - name: Wait for ssh wait_for: port=22 host={{ item.name }} - with_items: gce.instance_data | default([]) + with_items: gce.instance_data | default([], true) - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" @@ -44,4 +44,4 @@ until: result.rc == 0 retries: 30 delay: 5 - with_items: gce.instance_data | default([]) + with_items: gce.instance_data | default([], true) diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index f705745d9..e20e0a8bc 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -12,7 +12,7 @@ groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([])) + with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -42,7 +42,7 @@ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" zone: "{{ lookup('env', 'zone') }}" - with_items: groups['oo_hosts_to_terminate'] | default([]) + with_items: groups['oo_hosts_to_terminate'] | default([], true) when: item is defined #- include: ../openshift-node/terminate.yml -- cgit v1.2.3 From dc9e087205b7ce4b843a40f5d0046b5ad6634a70 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 7 Oct 2015 10:52:15 -0400 Subject: Add `oadm reconcile-cluster-role-bindings` to upgrade playbook. Switch to version_compare filter for conditionals. --- playbooks/adhoc/upgrades/upgrade.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index e666f0472..b43ab7607 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -40,7 +40,7 @@ hosts: oo_first_master tasks: fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later - when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) + when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) - name: Update cluster policy hosts: oo_first_master @@ -50,6 +50,19 @@ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --confirm +- name: Update cluster policy bindings + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-role-bindings --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + - name: Upgrade default router hosts: oo_first_master vars: -- cgit v1.2.3 From 17d55a94ed60e7e89fc704a80e61783d74c6af2f Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Wed, 14 Oct 2015 09:52:31 -0400 Subject: moved the timeout to 12 hours in the docker vg move --- playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index ef9b45abd..63d473146 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -172,7 +172,7 @@ - name: pvmove onto new volume command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" - async: 3600 + async: 43200 poll: 10 - name: Remove the old docker drive from the volume group -- cgit v1.2.3 From ccf9acd9dad5c9dc2e1640f417a9109c9042a689 Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Mon, 12 Oct 2015 15:29:35 -0400 Subject: Fixed GCE playbooks so that they're more like the AWS playbooks. Namely the GCE playbooks now: - Create infra nodes - Correctly label nodes in OpenShift - Setup masters as nodes as well (needed for sdn) - Removed set_infra_launch_facts_tasks.yml as it's not used anymore. --- .../set_infra_launch_facts_tasks.yml | 15 ----------- playbooks/gce/openshift-cluster/config.yml | 1 + playbooks/gce/openshift-cluster/launch.yml | 31 +++++++++++----------- .../openshift-cluster/tasks/launch_instances.yml | 21 ++++++++++++--- 4 files changed, 35 insertions(+), 33 deletions(-) delete mode 100644 playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml deleted file mode 100644 index 01d70a1a6..000000000 --- a/playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- set_fact: k8s_type=infra -- set_fact: sub_host_type="{{ type }}" -- set_fact: number_infra="{{ count }}" - -- name: Generate infra instance names(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" - register: infra_names_output - with_sequence: count={{ number_infra }} - -- set_fact: - infra_names: "{{ infra_names_output.results | default([], true) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 7bd3f1a56..6ca4f7395 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -20,6 +20,7 @@ g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 94e57fe4e..c22b897d5 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -28,21 +28,22 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" -# - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml -# vars: -# type: "infra" -# count: "{{ num_infra }}" -# - include: tasks/launch_instances.yml -# vars: -# instances: "{{ infra_names }}" -# cluster: "{{ cluster_id }}" -# type: "{{ k8s_type }}" -# g_sub_host_type: "{{ sub_host_type }}" -# -# - set_fact: -# a_infra: "{{ infra_names[0] }}" -# - add_host: name={{ a_infra }} groups=service_master -# + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - add_host: + name: "{{ master_names.0 }}" + groups: service_master + when: master_names is defined and master_names.0 is defined + - include: update.yml # #- name: Deploy OpenShift Services diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index e300b5b5a..c428cb465 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -20,22 +20,37 @@ - host-type-{{ type }} - sub-host-type-{{ g_sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} - when: instances |length > 0 + when: instances |length > 0 register: gce +- set_fact: + node_label: + # There doesn't seem to be a way to get the region directly, so parse it out of the zone. + region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" + type: "{{ g_sub_host_type }}" + when: instances |length > 0 and type == "node" + +- set_fact: + node_label: + # There doesn't seem to be a way to get the region directly, so parse it out of the zone. + region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" + type: "{{ type }}" + when: instances |length > 0 and type != "node" + - name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" + openshift_node_labels: "{{ node_label }}" with_items: gce.instance_data | default([], true) - name: Wait for ssh - wait_for: port=22 host={{ item.name }} + wait_for: port=22 host={{ item.public_ip }} with_items: gce.instance_data | default([], true) - name: Wait for user setup -- cgit v1.2.3 From ef1fef97dee3ae291344478d987108836e9a664d Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Thu, 15 Oct 2015 14:16:38 -0400 Subject: Removed AWS keys from command line, and substituted with environment variable lookup. --- playbooks/adhoc/s3_registry/s3_registry.j2 | 4 ++-- playbooks/adhoc/s3_registry/s3_registry.yml | 13 ++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index 026b24456..acfa89515 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -7,8 +7,8 @@ storage: cache: layerinfo: inmemory s3: - accesskey: {{ accesskey }} - secretkey: {{ secretkey }} + accesskey: {{ aws_access_key }} + secretkey: {{ aws_secret_key }} region: us-east-1 bucket: {{ clusterid }}-docker encrypt: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 30b873db3..92be64e17 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -10,11 +10,22 @@ remote_user: root gather_facts: False + vars: + aws_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" + aws_secret_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" tasks: + - name: Check for AWS creds + fail: + msg: "Couldn't find {{ item }} creds in ENV" + when: "{{ item }} == ''" + with_items: + - aws_access_key + - aws_secret_key + - name: Create S3 bucket local_action: - module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 -- cgit v1.2.3 From 14ae81a5c18a6cdf5bf00ada9eeec21a82cd982e Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Thu, 15 Oct 2015 14:33:58 -0400 Subject: Update example to remove passing in aws creds on command line. --- playbooks/adhoc/s3_registry/s3_registry.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 92be64e17..d1546b6fa 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,7 +1,7 @@ --- # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" +# ansible-playbook s3_registry.yml -e clusterid="mycluster" # # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- cgit v1.2.3 From ba7bf4f22ac6a7756a6a8ce6c28276667a968742 Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Mon, 19 Oct 2015 16:10:00 -0400 Subject: added a generic playbook (ops-docker-loopback-to-direct-lvm.yml) to convert a host from loop back to direct-lvm docker storage. --- .../ops-docker-loopback-to-direct-lvm.yml | 104 +++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100755 playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..614b2537a --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,104 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +# To run: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host= -e cli_docker_device= +# +# Example: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +# Notes: +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive + hosts: "{{ cli_name }}" + user: root + connection: ssh + gather_facts: no + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_docker_device + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: "check to see if {{ cli_docker_device }} exists" + command: "test -e {{ cli_docker_device }}" + register: docker_dev_check + ignore_errors: yes + + - debug: var=docker_dev_check + + - name: "fail if {{ cli_docker_device }} doesn't exist" + fail: + msg: "{{ cli_docker_device }} doesn't exist. Please investigate" + when: docker_dev_check.rc != 0 + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: copy the docker-storage-setup config file + copy: + content: > + DEVS={{ cli_docker_device }} + VG=docker_vg + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart -- cgit v1.2.3 From 538950fd7650ad09523553eff634b4d5a672edec Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Mon, 19 Oct 2015 17:36:58 -0400 Subject: Fix typos on env vars. --- playbooks/adhoc/s3_registry/s3_registry.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index d1546b6fa..5dc1abf17 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -11,8 +11,8 @@ gather_facts: False vars: - aws_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" - aws_secret_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" + aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" tasks: - name: Check for AWS creds -- cgit v1.2.3 From 14598f3a9cd7998a35a127832349f3ec57f4684b Mon Sep 17 00:00:00 2001 From: Jaroslav Henner Date: Mon, 19 Oct 2015 18:04:54 +0200 Subject: Use runcmd to restart network. Using bootcmd in cloud-config lead to restarts prior to starting the systemd-hostnamed, which was probable cause of the failure when DHCP client was failing to send the hostname, and subsequently, the ansible-opnshift was not able to identify the VM among the others when checking DHCP leases. The failure looked like: following 10:17:31 failed: [localhost] => {"attempts": 60, "changed": true, "cmd": "virsh -c qemu:///system net-dhcp-leases openshift-ansible | egrep -c 'experiment-node-compute-453d0|experiment-node-compute-61e16'", "delta": "0:00:00.033061", "end": "2015-10-19 10:17:31.409434", "failed": true, "rc": 0, "start": "2015-10-19 10:17:31.376373", "warnings": []} 10:17:31 stdout: 1 10:17:31 msg: Task failed as maximum retries was encountered --- playbooks/libvirt/openshift-cluster/templates/user-data | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index 77b788109..eacae7c7e 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,5 +19,5 @@ system_info: ssh_authorized_keys: - {{ lookup('file', '~/.ssh/id_rsa.pub') }} -bootcmd: +runcmd: - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart -- cgit v1.2.3 From 2679d760c8abbb1140f82582329dfdc8be835a76 Mon Sep 17 00:00:00 2001 From: Jaroslav Henner Date: Sat, 17 Oct 2015 22:40:40 +0200 Subject: Increase sleep when waiting for IP. It was timeouting on slower hardware. --- playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 2a0c90b46..4b91c6da8 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -64,7 +64,7 @@ register: nb_allocated_ips until: nb_allocated_ips.stdout == '{{ instances | length }}' retries: 60 - delay: 1 + delay: 3 when: instances | length != 0 - name: Collect IP addresses of the VMs -- cgit v1.2.3 From 94c2ba099a87c3dc6b87f9bf916c1282a7266e45 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 8 Oct 2015 17:52:01 -0400 Subject: Adjust the logic as to when examples are deployed --- playbooks/common/openshift-master/config.yml | 3 +-- roles/openshift_examples/defaults/main.yml | 6 +++--- roles/openshift_examples/tasks/main.yml | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 64cf7a65b..769bb2c6d 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -216,8 +216,7 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool - - role: openshift_examples - when: deployment_type in ['enterprise','openshift-enterprise','origin'] + - openshift_examples - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 7d4f100e3..caab9b8d6 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -1,9 +1,9 @@ --- # By default install rhel and xpaas streams on enterprise installs -openshift_examples_load_centos: "{{ openshift_deployment_type != 'enterprise' }}" -openshift_examples_load_rhel: "{{ openshift_deployment_type == 'enterprise' }}" +openshift_examples_load_centos: "{{ openshift_deployment_type not in ['enterprise','openshift-enterprise','atomic-enterprise'] }}" +openshift_examples_load_rhel: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise'] }}" openshift_examples_load_db_templates: true -openshift_examples_load_xpaas: "{{ openshift_deployment_type == 'enterprise' }}" +openshift_examples_load_xpaas: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise'] }}" openshift_examples_load_quickstarts: true examples_base: /usr/share/openshift/examples diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 3a829a4c6..ef98237cd 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -9,7 +9,7 @@ command: > {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ rhel_image_streams }} when: openshift_examples_load_rhel - register: oex_import_rhel_streams + register: oex_import_rhel_streams | bool failed_when: "'already exists' not in oex_import_rhel_streams.stderr and oex_import_rhel_streams.rc != 0" changed_when: false @@ -32,7 +32,7 @@ - name: Import quickstart-templates command: > {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }} - when: openshift_examples_load_quickstarts + when: openshift_examples_load_quickstarts | bool register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" changed_when: false -- cgit v1.2.3 From 8691cd2947146a24237fadc443eb02acf805a606 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Fri, 11 Sep 2015 13:13:17 -0700 Subject: Support HA or single router, and start work on registry --- inventory/byo/hosts.example | 4 +++ playbooks/adhoc/s3_registry/s3_registry.yml | 16 +++++++++--- playbooks/aws/openshift-cluster/launch.yml | 5 ---- .../common/openshift-cluster/create_services.yml | 8 ------ playbooks/common/openshift-master/config.yml | 7 ++++++ roles/openshift_facts/library/openshift_facts.py | 29 +++++++++++++++++++++- roles/openshift_master/tasks/main.yml | 3 +++ roles/openshift_registry/tasks/main.yml | 11 +++++--- roles/openshift_router/tasks/main.yml | 11 +++++--- 9 files changed, 69 insertions(+), 25 deletions(-) delete mode 100644 playbooks/common/openshift-cluster/create_services.yml (limited to 'playbooks') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index f554cc660..6b366cf87 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -75,6 +75,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default project node selector #osm_default_node_selector='region=primary' +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + # set RPM version for debugging purposes #openshift_pkg_version=-3.0.0.0 diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 5dc1abf17..4dcef1a42 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -6,13 +6,14 @@ # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- hosts: security_group_{{ clusterid }}_master +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master remote_user: root gather_facts: False vars: - aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" - aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" + aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + tasks: - name: Check for AWS creds @@ -23,10 +24,16 @@ - aws_access_key - aws_secret_key + - name: Scale down registry + command: oc scale --replicas=0 dc/docker-registry + - name: Create S3 bucket local_action: module: s3 bucket="{{ clusterid }}-docker" mode=create + - name: Set up registry environment variable + command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml + - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 @@ -54,6 +61,9 @@ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry when: "'dockersecrets' not in dc.stdout" + - name: Wait for deployment config to take effect before scaling up + pause: seconds=30 + - name: Scale up registry command: oc scale --replicas=1 dc/docker-registry diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index a89275597..786918929 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -55,9 +55,4 @@ when: master_names is defined and master_names.0 is defined - include: update.yml - -- include: ../../common/openshift-cluster/create_services.yml - vars: - g_svc_master: "{{ service_master }}" - - include: list.yml diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml deleted file mode 100644 index e70709d19..000000000 --- a/playbooks/common/openshift-cluster/create_services.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Deploy OpenShift Services - hosts: "{{ g_svc_master }}" - connection: ssh - gather_facts: yes - roles: - - openshift_registry - - openshift_router diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 14ec82e85..678e1c2d5 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -254,3 +254,10 @@ roles: - openshift_serviceaccounts + +- name: Create services + hosts: oo_first_master + + roles: + - openshift_router +# - openshift_registry diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 69bb49c9b..6a32b24aa 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,6 +1,9 @@ -#!/usr/bin/python +#!/usr/bin/python # pylint: disable=too-many-lines # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 +# Reason: Disable pylint too-many-lines because we don't want to split up this file. +# Status: Permanently disabled to keep this module as self-contained as possible. + """Ansible module for retrieving and setting openshift related facts""" DOCUMENTATION = ''' @@ -318,6 +321,29 @@ def set_node_schedulability(facts): facts['node']['schedulable'] = True return facts +def set_master_selectors(facts): + """ Set selectors facts if not already present in facts dict + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated selectors + facts if they were not already present + + """ + if 'master' in facts: + if 'infra_nodes' in facts['master']: + deployment_type = facts['common']['deployment_type'] + if deployment_type == 'online': + selector = "type=infra" + else: + selector = "region=infra" + + if 'router_selector' not in facts['master']: + facts['master']['router_selector'] = selector + if 'registry_selector' not in facts['master']: + facts['master']['registry_selector'] = selector + return facts + def set_metrics_facts_if_unset(facts): """ Set cluster metrics facts if not already present in facts dict dict: the facts dict updated with the generated cluster metrics facts if @@ -782,6 +808,7 @@ class OpenShiftFacts(object): facts = set_url_facts_if_unset(facts) facts = set_fluentd_facts_if_unset(facts) facts = set_node_schedulability(facts) + facts = set_master_selectors(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 73c04cb08..4dcab31d1 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -52,8 +52,11 @@ default_subdomain: "{{ osm_default_subdomain | default(None) }}" custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}" default_node_selector: "{{ osm_default_node_selector | default(None) }}" + router_selector: "{{ openshift_router_selector | default(None) }}" + registry_selector: "{{ openshift_registry_selector | default(None) }}" api_server_args: "{{ osm_api_server_args | default(None) }}" controller_args: "{{ osm_controller_args | default(None) }}" + infra_nodes: "{{ num_infra | default(None) }}" - name: Install Master package yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml index 29387d7d5..749eea5c0 100644 --- a/roles/openshift_registry/tasks/main.yml +++ b/roles/openshift_registry/tasks/main.yml @@ -1,11 +1,14 @@ --- -- set_fact: _oreg_images="--images={{ oreg_url|quote }}" - when: oreg_url is defined +# This role is unused until we add options for configuring the backend storage + +- set_fact: _oreg_images="--images='{{ openshift.master.registry_url }}'" + +- set_fact: _oreg_selector="--selector='{{ openshift.master.registry_selector }}'" - name: Deploy OpenShift Registry command: > {{ openshift.common.admin_binary }} registry - --create - --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images|default() }} + --create --service-account=registry {{ _oreg_selector }} + --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images }} register: _oreg_results changed_when: "'service exists' not in _oreg_results.stdout" diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml index 929177262..b88b020fe 100644 --- a/roles/openshift_router/tasks/main.yml +++ b/roles/openshift_router/tasks/main.yml @@ -1,11 +1,14 @@ --- -- set_fact: _ortr_images="--images={{ oreg_url|quote }}" - when: oreg_url is defined + +- set_fact: _ortr_images="--images='{{ openshift.master.registry_url }}'" + +- set_fact: _ortr_selector="--selector='{{ openshift.master.router_selector }}'" - name: Deploy OpenShift Router command: > {{ openshift.common.admin_binary }} router - --create - --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images|default() }} + --create --replicas={{ num_infra }} + --service-account=router {{ _ortr_selector }} + --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images }} register: _ortr_results changed_when: "'service exists' not in _ortr_results.stdout" -- cgit v1.2.3 From a921f8c296467cf72b0d273d8891dcd2f2570bea Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 21 Oct 2015 17:25:51 -0400 Subject: Fix yaml tabbing --- playbooks/adhoc/upgrades/upgrade.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index b43ab7607..56a1df860 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -61,7 +61,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm - when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') - name: Upgrade default router hosts: oo_first_master -- cgit v1.2.3 From d121d8c208d4b5ea974f2f9d1ecf529f8fca7f44 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 22 Oct 2015 01:11:25 -0400 Subject: Conditionally include openshift_router role. --- playbooks/common/openshift-master/config.yml | 6 +++--- roles/openshift_router/tasks/main.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 678e1c2d5..f4bf0e62c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -257,7 +257,7 @@ - name: Create services hosts: oo_first_master - roles: - - openshift_router -# - openshift_registry + - role: openshift_router + when: openshift.master.infra_nodes is defined + #- role: openshift_registry diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml index b88b020fe..498a65127 100644 --- a/roles/openshift_router/tasks/main.yml +++ b/roles/openshift_router/tasks/main.yml @@ -7,7 +7,7 @@ - name: Deploy OpenShift Router command: > {{ openshift.common.admin_binary }} router - --create --replicas={{ num_infra }} + --create --replicas={{ openshift.master.infra_nodes }} --service-account=router {{ _ortr_selector }} --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images }} register: _ortr_results -- cgit v1.2.3 From 0c5e2522e44aee9309336049633eb82531f997b6 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Tue, 20 Oct 2015 14:38:22 -0400 Subject: Improvements to uninstallation playbook This is related to https://trello.com/c/314nwSvt/58-3-uninstall-playbook The original atomic_openshift_tutorial_reset.yml now calls the uninstall playbook for most parts. All the originally functionally is still intact. The main differences between the two playbooks is that the uninstall playbook is careful only to delete content that ansible originally installed. --- .../adhoc/atomic_openshift_tutorial_reset.yml | 77 +------------- playbooks/adhoc/uninstall.yml | 111 +++++++++++++++++++++ 2 files changed, 114 insertions(+), 74 deletions(-) create mode 100644 playbooks/adhoc/uninstall.yml (limited to 'playbooks') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 54d3ea278..c14d08e87 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -1,6 +1,9 @@ # This deletes *ALL* Docker images, and uninstalls OpenShift and # Atomic Enterprise RPMs. It is primarily intended for use # with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml - hosts: - OSEv3:children @@ -8,59 +11,6 @@ sudo: yes tasks: - - service: name={{ item }} state=stopped - with_items: - - openvswitch - - origin-master - - origin-node - - atomic-openshift-master - - atomic-openshift-node - - openshift-master - - openshift-node - - atomic-enterprise-master - - atomic-enterprise-node - - etcd - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - etcd - - origin - - origin-master - - origin-node - - origin-sdn-ovs - - tuned-profiles-origin-node - - atomic-openshift - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - tuned-profiles-atomic-openshift-node - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - openshift - - openshift-master - - openshift-node - - openshift-sdn-ovs - - tuned-profiles-openshift-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - shell: docker ps -a -q | xargs docker stop changed_when: False failed_when: False @@ -73,27 +23,6 @@ changed_when: False failed_when: False - - file: path={{ item }} state=absent - with_items: - - /etc/openshift-sdn - - /root/.kube - - /etc/origin - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/origin - - /var/lib/openshift - - /var/lib/atomic-enterprise - - /etc/sysconfig/origin-master - - /etc/sysconfig/origin-node - - /etc/sysconfig/atomic-openshift-master - - /etc/sysconfig/atomic-openshift-node - - /etc/sysconfig/openshift-master - - /etc/sysconfig/openshift-node - - /etc/sysconfig/atomic-enterprise-master - - /etc/sysconfig/atomic-enterprise-node - - /etc/etcd - - /var/lib/etcd - - user: name={{ item }} state=absent remove=yes with_items: - alice diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..1a3c56d95 --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,111 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible. This includes: +# +# configuration +# containers +# example templates and imagestreams +# images +# RPMs +--- +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - openvswitch + - origin-master + - origin-node + - atomic-openshift-master + - atomic-openshift-node + - openshift-master + - openshift-node + - atomic-enterprise-master + - atomic-enterprise-node + - etcd + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - etcd + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-origin-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - tuned-profiles-atomic-openshift-node + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - openshift + - openshift-master + - openshift-node + - openshift-sdn-ovs + - tuned-profiles-openshift-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node + changed_when: False + failed_when: False + with_items: + - openshift-enterprise + - atomic-enterprise + - origin + + - shell: docker images | grep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry.access.redhat.com/openshift3 + - registry.access.redhat.com/aep3 + - docker.io/openshift + + - shell: "docker rmi {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + + - file: path={{ item }} state=absent + with_items: + - /etc/atomic-enterprise + - /etc/etcd + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /root/.kube + - /usr/share/openshift/examples + - /var/lib/atomic-enterprise + - /var/lib/etcd + - /var/lib/openshift + - /var/lib/origin -- cgit v1.2.3 From 1bf7844f61785e717f8563d03994841d0a71ac28 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 10:26:45 -0400 Subject: Adding *master-api and *master-controllers to the list of units to stop (also sorted the various lists alphabetically) --- playbooks/adhoc/uninstall.yml | 44 ++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 1a3c56d95..ecd858e68 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -15,41 +15,47 @@ tasks: - service: name={{ item }} state=stopped with_items: - - openvswitch - - origin-master - - origin-node + - atomic-enterprise-master + - atomic-enterprise-node - atomic-openshift-master + - atomic-openshift-master-api + - atomic-openshift-master-controllers - atomic-openshift-node + - etcd - openshift-master + - openshift-master-api + - openshift-master-controllers - openshift-node - - atomic-enterprise-master - - atomic-enterprise-node - - etcd - - - yum: name={{ item }} state=absent - with_items: - openvswitch - - etcd - - origin - origin-master + - origin-master-api + - origin-master-controllers - origin-node - - origin-sdn-ovs - - tuned-profiles-origin-node - - atomic-openshift - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - tuned-profiles-atomic-openshift-node + + - yum: name={{ item }} state=absent + with_items: - atomic-enterprise - atomic-enterprise-master - atomic-enterprise-node - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - etcd - openshift - openshift-master - openshift-node - openshift-sdn-ovs + - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node - tuned-profiles-openshift-node + - tuned-profiles-origin-node - shell: systemctl reset-failed changed_when: False -- cgit v1.2.3 From 1b0c615d3c1c7dfd6484ba399763282586475599 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 11:31:18 -0400 Subject: Removing the openshift facts --- playbooks/adhoc/uninstall.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ecd858e68..3e865706d 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -96,6 +96,7 @@ - file: path={{ item }} state=absent with_items: + - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - /etc/etcd - /etc/openshift -- cgit v1.2.3 From 6ada8b8eb4ebe60ba18226caa5b4812b26161379 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 15:56:24 -0400 Subject: Deleting exited openshift containers and some other minor touch ups --- playbooks/adhoc/uninstall.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 3e865706d..40db668da 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -39,6 +39,7 @@ - atomic-enterprise-node - atomic-enterprise-sdn-ovs - atomic-openshift + - atomic-openshift-clients - atomic-openshift-master - atomic-openshift-node - atomic-openshift-sdn-ovs @@ -46,6 +47,7 @@ - openshift - openshift-master - openshift-node + - openshift-sdn - openshift-sdn-ovs - openvswitch - origin @@ -80,6 +82,20 @@ - atomic-enterprise - origin + - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' + changed_when: False + failed_when: False + register: exited_containers_to_delete + with_items: + - aep3/aep + - openshift3/ose + - openshift/origin + + - shell: "docker rm {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ exited_containers_to_delete.results }}" + - shell: docker images | grep {{ item }} | awk '{ print $3 }' changed_when: False failed_when: False @@ -89,7 +105,7 @@ - registry.access.redhat.com/aep3 - docker.io/openshift - - shell: "docker rmi {{ item.stdout_lines | join(' ') }}" + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" changed_when: False failed_when: False with_items: "{{ images_to_delete.results }}" -- cgit v1.2.3 From 3e44d3aa6d35c62c57c102f5a8fec4bf86d2a1b5 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 21 Oct 2015 13:41:56 -0400 Subject: Fix test and workaround for rpm generated configs - fixed inconcistency in naming for rpm generated config test - refactoring to fix logic after the ha master refactoring had broken the previous steps --- playbooks/common/openshift-master/config.yml | 15 +++++++++++++++ roles/openshift_master/tasks/main.yml | 14 -------------- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 0d78eca30..0a3fe90e1 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -2,6 +2,21 @@ - name: Set master facts and determine if external etcd certs need to be generated hosts: oo_masters_to_config pre_tasks: + - name: Check for RPM generated config marker file .config_managed + stat: + path: /etc/origin/.config_managed + register: rpmgenerated_config + + - name: Remove RPM generated config files if present + file: + path: "/etc/origin/{{ item }}" + state: absent + when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise'] + with_items: + - master + - node + - .config_managed + - set_fact: openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" openshift_master_etcd_hosts: "{{ hostvars diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 4dcab31d1..a5c1a805c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -62,20 +62,6 @@ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present register: install_result -- name: Check for RPM generated config marker file /etc/origin/.config_managed - stat: path=/etc/origin/.rpmgenerated - register: rpmgenerated_config - -- name: Remove RPM generated config files - file: - path: "{{ item }}" - state: absent - when: openshift.common.service_type in ['atomic-enterprise','openshift-enterprise'] and rpmgenerated_config.stat.exists == true - with_items: - - "{{ openshift.common.config_base }}/master" - - "{{ openshift.common.config_base }}/node" - - "{{ openshift.common.config_base }}/.rpmgenerated" - # TODO: These values need to be configurable - name: Set dns facts openshift_facts: -- cgit v1.2.3 From 7f5c403e144e6ef4d39bf7b11adb4c4a8976521c Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 21 Oct 2015 16:17:39 -0400 Subject: Add proxy client certs to master config. --- playbooks/adhoc/upgrades/upgrade.yml | 10 ++++++++++ playbooks/common/openshift-master/config.yml | 2 ++ roles/openshift_master/templates/master.yaml.v1.j2 | 3 +++ roles/openshift_master_ca/tasks/main.yml | 3 +-- 4 files changed, 16 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index 56a1df860..ae1d0127c 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -1,4 +1,14 @@ --- +- name: Upgrade base package on masters + hosts: masters + roles: + - openshift_facts + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade base package + yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest + - name: Re-Run cluster configuration to apply latest configuration changes include: ../../common/openshift-cluster/config.yml vars: diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 0a3fe90e1..ecea608b2 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -137,6 +137,7 @@ openshift_master_certs_no_etcd: - admin.crt - master.kubelet-client.crt + - master.proxy-client.crt - master.server.crt - openshift-master.crt - openshift-registry.crt @@ -144,6 +145,7 @@ - etcd.server.crt openshift_master_certs_etcd: - master.etcd-client.crt + - set_fact: openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}" diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 6e45eaad7..72fdcf88d 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -74,6 +74,9 @@ kubernetesMasterConfig: masterCount: 1 masterIP: "" podEvictionTimeout: "" + proxyClientInfo: + certFile: master.proxy-client.crt + keyFile: master.proxy-client.key schedulerConfigFile: {{ openshift_master_scheduler_conf }} servicesNodePortRange: "" servicesSubnet: {{ openshift.master.portal_net }} diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 5c9639ea5..cfd1ceabf 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -18,5 +18,4 @@ --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_master_config_dir }} --overwrite=false - args: - creates: "{{ openshift_master_config_dir }}/master.server.key" + when: master_certs_missing -- cgit v1.2.3 From 5aff702d10b79822098ca68f9ee3184be45775d7 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 22 Oct 2015 13:12:22 -0400 Subject: Don't include proxy client cert when <3.1 or <1.1 --- playbooks/common/openshift-master/config.yml | 10 +++++++--- roles/openshift_master_certificates/tasks/main.yml | 5 +++-- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ecea608b2..47e568f06 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -133,11 +133,14 @@ - name: Determine if master certificates need to be generated hosts: oo_masters_to_config tasks: + - set_fact: + include_proxy_client_cert: "{{ (openshift.common.version | version_compare('1.0.6', '>')) if openshift.common.deployment_type == 'origin' else (openshift.common.version | version_compare('3.0.2', '>')) }}" + - set_fact: openshift_master_certs_no_etcd: - admin.crt - master.kubelet-client.crt - - master.proxy-client.crt + - "{{ 'master.proxy-client.crt' if include_proxy_client_cert else omit }}" - master.server.crt - openshift-master.crt - openshift-registry.crt @@ -155,9 +158,9 @@ with_items: openshift_master_certs register: g_master_cert_stat_result - set_fact: - master_certs_missing: "{{ g_master_cert_stat_result.results + master_certs_missing: "{{ False in (g_master_cert_stat_result.results | map(attribute='stat.exists') - | list | intersect([false])}}" + | list ) }}" master_cert_subdir: master-{{ openshift.common.hostname }} master_cert_config_dir: "{{ openshift.common.config_base }}/master" @@ -189,6 +192,7 @@ args: creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" with_items: masters_needing_certs + - name: Retrieve the master cert tarball from the master fetch: src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 0d75a9eb3..87e8181c1 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -20,6 +20,8 @@ - admin.kubeconfig - master.kubelet-client.crt - master.kubelet-client.key + - "{{ 'master.proxy-client.crt' if openshift.master.include_proxy_client_cert else omit }}" + - "{{ 'master.proxy-client.key' if openshift.master.include_proxy_client_cert else omit }}" - openshift-master.crt - openshift-master.key - openshift-master.kubeconfig @@ -41,6 +43,5 @@ --public-master={{ item.openshift.master.public_api_url }} --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }} --overwrite=false - args: - creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt" + when: master_certs_missing with_items: masters_needing_certs -- cgit v1.2.3 From 7eefcf8a04251da4d10deb936273847d47ccb609 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 22 Oct 2015 16:48:24 -0400 Subject: Move version greater_than_fact into openshift_facts --- playbooks/common/openshift-master/config.yml | 5 +---- roles/openshift_facts/library/openshift_facts.py | 10 ++++++++-- roles/openshift_master_certificates/tasks/main.yml | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 47e568f06..1dec923fc 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -133,14 +133,11 @@ - name: Determine if master certificates need to be generated hosts: oo_masters_to_config tasks: - - set_fact: - include_proxy_client_cert: "{{ (openshift.common.version | version_compare('1.0.6', '>')) if openshift.common.deployment_type == 'origin' else (openshift.common.version | version_compare('3.0.2', '>')) }}" - - set_fact: openshift_master_certs_no_etcd: - admin.crt - master.kubelet-client.crt - - "{{ 'master.proxy-client.crt' if include_proxy_client_cert else omit }}" + - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" - master.server.crt - openshift-master.crt - openshift-registry.crt diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 3570de693..d0388e6fe 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -20,6 +20,7 @@ EXAMPLES = ''' import ConfigParser import copy import os +from ansible.runner.filter_plugins.core import version_compare from distutils.util import strtobool @@ -501,7 +502,12 @@ def set_deployment_facts_if_unset(facts): if deployment_type in ['enterprise', 'online']: data_dir = '/var/lib/openshift' facts['common']['data_dir'] = data_dir - facts['common']['version'] = get_openshift_version() + facts['common']['version'] = version = get_openshift_version() + if deployment_type == 'origin': + version_gt_3_1_or_1_1 = version_compare(version, '1.0.6', '>') + else: + version_gt_3_1_or_1_1 = version_compare(version, '3.0.2', '>') + facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 for role in ('master', 'node'): if role in facts: @@ -632,7 +638,7 @@ def get_openshift_version(): Returns: version: the current openshift version """ - version = '' + version = None if os.path.isfile('/usr/bin/openshift'): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 87e8181c1..e4602337e 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -20,8 +20,8 @@ - admin.kubeconfig - master.kubelet-client.crt - master.kubelet-client.key - - "{{ 'master.proxy-client.crt' if openshift.master.include_proxy_client_cert else omit }}" - - "{{ 'master.proxy-client.key' if openshift.master.include_proxy_client_cert else omit }}" + - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" + - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" - openshift-master.crt - openshift-master.key - openshift-master.kubeconfig -- cgit v1.2.3 From a6c34115d3e06f502846b271a2fc88eb202f767e Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Sat, 24 Oct 2015 10:26:34 -0400 Subject: added docker info to the end of docker loop to direct lvm playbook. --- .../ops-docker-loopback-to-direct-lvm.yml | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml index 614b2537a..72fcd77b3 100755 --- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -97,8 +97,19 @@ - debug: var=setup_output + - name: extend the vg + command: lvextend -l 90%VG /dev/docker_vg/docker-pool + register: extend_output + + - debug: var=extend_output + - name: start docker - command: systemctl start docker.service - register: dockerstart + service: + name: docker + state: restarted + + - name: docker info + command: docker info + register: dockerinfo - - debug: var=dockerstart + - debug: var=dockerinfo -- cgit v1.2.3 From 7558c4e35e076704624fdffa347a08cf7f3a804f Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Tue, 27 Oct 2015 13:42:11 -0400 Subject: Adding uninstall support for Atomic Host --- playbooks/adhoc/uninstall.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 40db668da..8cc5b9406 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -1,6 +1,6 @@ # This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift # Enterprise content installed by ansible. This includes: -# +# # configuration # containers # example templates and imagestreams @@ -13,6 +13,14 @@ sudo: yes tasks: + - name: Detecting Operating System + shell: ls /run/ostree-booted + ignore_errors: yes + register: ostree_output + + - set_fact: + is_atomic: ostree_output.rc == 0 + - service: name={{ item }} state=stopped with_items: - atomic-enterprise-master @@ -33,6 +41,7 @@ - origin-node - yum: name={{ item }} state=absent + when: not is_atomic with_items: - atomic-enterprise - atomic-enterprise-master -- cgit v1.2.3 From d753108350bfa8c41ba7c57bcb870a4e303c5659 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 28 Oct 2015 10:22:18 -0400 Subject: The uninstall playbook should remove the kubeconfig for non-root installs --- playbooks/adhoc/uninstall.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 8cc5b9406..af2108690 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -136,6 +136,7 @@ - /etc/sysconfig/origin-master - /etc/sysconfig/origin-node - /root/.kube + - "~{{ ansible_ssh_user }}/.kube" - /usr/share/openshift/examples - /var/lib/atomic-enterprise - /var/lib/etcd -- cgit v1.2.3 From e0bd8bfa45d85832e1f619fbf91934dff0706c9c Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 28 Oct 2015 13:40:27 -0400 Subject: Bug fixes for the uninstall playbook 1) is_atomic wasn't being evaluated properly 2) the way we were detecting it was resulting in a confusion error message being displayed to the user in the case of RHEL 7 Server --- playbooks/adhoc/uninstall.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index af2108690..7d1544be8 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -16,10 +16,11 @@ - name: Detecting Operating System shell: ls /run/ostree-booted ignore_errors: yes + failed_when: false register: ostree_output - set_fact: - is_atomic: ostree_output.rc == 0 + is_atomic: "{{ ostree_output.rc == 0 }}" - service: name={{ item }} state=stopped with_items: @@ -41,7 +42,7 @@ - origin-node - yum: name={{ item }} state=absent - when: not is_atomic + when: not is_atomic | bool with_items: - atomic-enterprise - atomic-enterprise-master -- cgit v1.2.3