summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_tower/tasks/main.yaml3
-rw-r--r--roles/etcd/tasks/main.yml1
-rw-r--r--roles/etcd_ca/tasks/main.yml1
-rw-r--r--roles/fluentd_master/tasks/main.yml10
-rw-r--r--roles/lib_zabbix/library/zbx_action.py538
-rw-r--r--roles/lib_zabbix/library/zbx_application.py21
-rw-r--r--roles/lib_zabbix/library/zbx_discoveryrule.py36
-rw-r--r--roles/lib_zabbix/library/zbx_item.py134
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py67
-rw-r--r--roles/lib_zabbix/library/zbx_trigger.py70
-rw-r--r--roles/lib_zabbix/library/zbx_triggerprototype.py177
-rw-r--r--roles/lib_zabbix/library/zbx_user.py2
-rw-r--r--roles/lib_zabbix/library/zbx_user_media.py56
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml65
-rw-r--r--roles/openshift_common/tasks/main.yml2
-rw-r--r--roles/openshift_common/vars/main.yml2
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json14
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json4
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json7
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json7
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json4
-rw-r--r--roles/openshift_examples/tasks/main.yml12
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py133
-rw-r--r--roles/openshift_facts/tasks/main.yml4
-rw-r--r--roles/openshift_manage_node/tasks/main.yml18
-rw-r--r--roles/openshift_master/README.md10
-rw-r--r--roles/openshift_master/defaults/main.yml10
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/meta/main.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml65
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/scheduler.json.j22
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j21
-rw-r--r--roles/openshift_master/vars/main.yml3
-rw-r--r--roles/openshift_master_ca/tasks/main.yml4
-rw-r--r--roles/openshift_master_ca/vars/main.yml3
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_master_certificates/vars/main.yml4
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml4
-rw-r--r--roles/openshift_node/README.md16
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/handlers/main.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml41
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j215
-rw-r--r--roles/openshift_node/vars/main.yml3
-rw-r--r--roles/openshift_node_certificates/README.md4
-rw-r--r--roles/openshift_node_certificates/vars/main.yml6
-rw-r--r--roles/openshift_registry/vars/main.yml3
-rw-r--r--roles/openshift_repos/vars/main.yml7
-rw-r--r--roles/openshift_router/vars/main.yml3
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml26
-rw-r--r--roles/openshift_serviceaccounts/templates/serviceaccount.j24
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/os_zabbix/tasks/main.yml55
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_agent.yml23
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_server.yml408
-rw-r--r--roles/os_zabbix/vars/template_docker.yml89
-rw-r--r--roles/os_zabbix/vars/template_heartbeat.yml3
-rw-r--r--roles/os_zabbix/vars/template_host.yml27
-rw-r--r--roles/os_zabbix/vars/template_master.yml27
-rw-r--r--roles/os_zabbix/vars/template_node.yml27
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml58
-rw-r--r--roles/os_zabbix/vars/template_openshift_node.yml44
-rw-r--r--roles/os_zabbix/vars/template_ops_tools.yml23
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml199
-rw-r--r--roles/os_zabbix/vars/template_router.yml27
68 files changed, 2240 insertions, 424 deletions
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index c110a3b70..b7757214d 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -9,6 +9,7 @@
- ansible
- telnet
- ack
+ - pylint
- name: download Tower setup
get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -38,5 +39,3 @@
regexp: "^({{ item.option }})( *)="
line: '\1\2= {{ item.value }}'
with_items: config_changes | default([], true)
-
-
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 27bfb7de9..656901409 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -38,6 +38,7 @@
template:
src: etcd.conf.j2
dest: /etc/etcd/etcd.conf
+ backup: true
notify:
- restart etcd
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index 8a266f732..625756867 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -18,6 +18,7 @@
- template:
dest: "{{ etcd_ca_dir }}/fragments/openssl_append.cnf"
src: openssl_append.j2
+ backup: true
- assemble:
src: "{{ etcd_ca_dir }}/fragments"
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index d592dc306..55cd94460 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -39,12 +39,16 @@
owner: 'td-agent'
mode: 0444
-- name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes."
- pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }}
+- name: wait for etcd to start up
+ wait_for: port=4001 delay=10
+ when: embedded_etcd | bool
+
+- name: wait for etcd peer to start up
+ wait_for: port=7001 delay=10
+ when: embedded_etcd | bool
- name: ensure td-agent is running
service:
name: 'td-agent'
state: started
enabled: yes
-
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
new file mode 100644
index 000000000..d64cebae1
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_action.py
@@ -0,0 +1,538 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix actions
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix action ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def conditions_equal(zab_conditions, user_conditions):
+ '''Compare two lists of conditions'''
+ c_type = 'conditiontype'
+ _op = 'operator'
+ val = 'value'
+ if len(user_conditions) != len(zab_conditions):
+ return False
+
+ for zab_cond, user_cond in zip(zab_conditions, user_conditions):
+ if zab_cond[c_type] != str(user_cond[c_type]) or zab_cond[_op] != str(user_cond[_op]) or \
+ zab_cond[val] != str(user_cond[val]):
+ return False
+
+ return True
+
+def filter_differences(zabbix_filters, user_filters):
+ '''Determine the differences from user and zabbix for operations'''
+ rval = {}
+ for key, val in user_filters.items():
+
+ if key == 'conditions':
+ if not conditions_equal(zabbix_filters[key], val):
+ rval[key] = val
+
+ elif zabbix_filters[key] != str(val):
+ rval[key] = val
+
+ return rval
+
+# This logic is quite complex. We are comparing two lists of dictionaries.
+# The outer for-loops allow us to descend down into both lists at the same time
+# and then walk over the key,val pairs of the incoming user dict's changes
+# or updates. The if-statements are looking at different sub-object types and
+# comparing them. The other suggestion on how to write this is to write a recursive
+# compare function but for the time constraints and for complexity I decided to go
+# this route.
+# pylint: disable=too-many-branches
+def operation_differences(zabbix_ops, user_ops):
+ '''Determine the differences from user and zabbix for operations'''
+
+ # if they don't match, take the user options
+ if len(zabbix_ops) != len(user_ops):
+ return user_ops
+
+ rval = {}
+ for zab, user in zip(zabbix_ops, user_ops):
+ for key, val in user.items():
+ if key == 'opconditions':
+ for z_cond, u_cond in zip(zab[key], user[key]):
+ if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
+ ['conditiontype', 'operator', 'value']]):
+ rval[key] = val
+ break
+ elif key == 'opmessage':
+ # Verify each passed param matches
+ for op_msg_key, op_msg_val in val.items():
+ if zab[key][op_msg_key] != str(op_msg_val):
+ rval[key] = val
+ break
+
+ elif key == 'opmessage_grp':
+ zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]])
+ usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val])
+ if usr_grp_ids != zab_grp_ids:
+ rval[key] = val
+
+ elif key == 'opmessage_usr':
+ zab_usr_ids = set([usr['userid'] for usr in zab[key]])
+ usr_ids = set([usr['userid'] for usr in val])
+ if usr_ids != zab_usr_ids:
+ rval[key] = val
+
+ elif zab[key] != str(val):
+ rval[key] = val
+ return rval
+
+def get_users(zapi, users):
+ '''get the mediatype id from the mediatype name'''
+ rval_users = []
+
+ for user in users:
+ content = zapi.get_content('user',
+ 'get',
+ {'filter': {'alias': user}})
+ rval_users.append({'userid': content['result'][0]['userid']})
+
+ return rval_users
+
+def get_user_groups(zapi, groups):
+ '''get the mediatype id from the mediatype name'''
+ user_groups = []
+
+ content = zapi.get_content('usergroup',
+ 'get',
+ {'search': {'name': groups}})
+
+ for usr_grp in content['result']:
+ user_groups.append({'usrgrpid': usr_grp['usrgrpid']})
+
+ return user_groups
+
+def get_mediatype_id_by_name(zapi, m_name):
+ '''get the mediatype id from the mediatype name'''
+ content = zapi.get_content('mediatype',
+ 'get',
+ {'filter': {'description': m_name}})
+
+ return content['result'][0]['mediatypeid']
+
+def get_priority(priority):
+ ''' determine priority
+ '''
+ prior = 0
+ if 'info' in priority:
+ prior = 1
+ elif 'warn' in priority:
+ prior = 2
+ elif 'avg' == priority or 'ave' in priority:
+ prior = 3
+ elif 'high' in priority:
+ prior = 4
+ elif 'dis' in priority:
+ prior = 5
+
+ return prior
+
+def get_event_source(from_src):
+ '''Translate even str into value'''
+ choices = ['trigger', 'discovery', 'auto', 'internal']
+ rval = 0
+ try:
+ rval = choices.index(from_src)
+ except ValueError as _:
+ ZabbixAPIError('Value not found for event source [%s]' % from_src)
+
+ return rval
+
+def get_status(inc_status):
+ '''determine status for action'''
+ rval = 1
+ if inc_status == 'enabled':
+ rval = 0
+
+ return rval
+
+def get_condition_operator(inc_operator):
+ ''' determine the condition operator'''
+ vals = {'=': 0,
+ '<>': 1,
+ 'like': 2,
+ 'not like': 3,
+ 'in': 4,
+ '>=': 5,
+ '<=': 6,
+ 'not in': 7,
+ }
+
+ return vals[inc_operator]
+
+def get_host_id_by_name(zapi, host_name):
+ '''Get host id by name'''
+ content = zapi.get_content('host',
+ 'get',
+ {'filter': {'name': host_name}})
+
+ return content['result'][0]['hostid']
+
+def get_trigger_value(inc_trigger):
+ '''determine the proper trigger value'''
+ rval = 1
+ if inc_trigger == 'PROBLEM':
+ rval = 1
+ else:
+ rval = 0
+
+ return rval
+
+def get_template_id_by_name(zapi, t_name):
+ '''get the template id by name'''
+ content = zapi.get_content('template',
+ 'get',
+ {'filter': {'host': t_name}})
+
+ return content['result'][0]['templateid']
+
+
+def get_host_group_id_by_name(zapi, hg_name):
+ '''Get hostgroup id by name'''
+ content = zapi.get_content('hostgroup',
+ 'get',
+ {'filter': {'name': hg_name}})
+
+ return content['result'][0]['groupid']
+
+def get_condition_type(event_source, inc_condition):
+ '''determine the condition type'''
+ c_types = {}
+ if event_source == 'trigger':
+ c_types = {'host group': 0,
+ 'host': 1,
+ 'trigger': 2,
+ 'trigger name': 3,
+ 'trigger severity': 4,
+ 'trigger value': 5,
+ 'time period': 6,
+ 'host template': 13,
+ 'application': 15,
+ 'maintenance status': 16,
+ }
+
+ elif event_source == 'discovery':
+ c_types = {'host IP': 7,
+ 'discovered service type': 8,
+ 'discovered service port': 9,
+ 'discovery status': 10,
+ 'uptime or downtime duration': 11,
+ 'received value': 12,
+ 'discovery rule': 18,
+ 'discovery check': 19,
+ 'proxy': 20,
+ 'discovery object': 21,
+ }
+
+ elif event_source == 'auto':
+ c_types = {'proxy': 20,
+ 'host name': 22,
+ 'host metadata': 24,
+ }
+
+ elif event_source == 'internal':
+ c_types = {'host group': 0,
+ 'host': 1,
+ 'host template': 13,
+ 'application': 15,
+ 'event type': 23,
+ }
+ else:
+ raise ZabbixAPIError('Unkown event source %s' % event_source)
+
+ return c_types[inc_condition]
+
+def get_operation_type(inc_operation):
+ ''' determine the correct operation type'''
+ o_types = {'send message': 0,
+ 'remote command': 1,
+ 'add host': 2,
+ 'remove host': 3,
+ 'add to host group': 4,
+ 'remove from host group': 5,
+ 'link to template': 6,
+ 'unlink from template': 7,
+ 'enable host': 8,
+ 'disable host': 9,
+ }
+
+ return o_types[inc_operation]
+
+def get_action_operations(zapi, inc_operations):
+ '''Convert the operations into syntax for api'''
+ for operation in inc_operations:
+ operation['operationtype'] = get_operation_type(operation['operationtype'])
+ if operation['operationtype'] == 0: # send message. Need to fix the
+ operation['opmessage']['mediatypeid'] = \
+ get_mediatype_id_by_name(zapi, operation['opmessage']['mediatypeid'])
+ operation['opmessage_grp'] = get_user_groups(zapi, operation.get('opmessage_grp', []))
+ operation['opmessage_usr'] = get_users(zapi, operation.get('opmessage_usr', []))
+ if operation['opmessage']['default_msg']:
+ operation['opmessage']['default_msg'] = 1
+ else:
+ operation['opmessage']['default_msg'] = 0
+
+ # NOT supported for remote commands
+ elif operation['operationtype'] == 1:
+ continue
+
+ # Handle Operation conditions:
+ # Currently there is only 1 available which
+ # is 'event acknowledged'. In the future
+ # if there are any added we will need to pass this
+ # option to a function and return the correct conditiontype
+ if operation.has_key('opconditions'):
+ for condition in operation['opconditions']:
+ if condition['conditiontype'] == 'event acknowledged':
+ condition['conditiontype'] = 14
+
+ if condition['operator'] == '=':
+ condition['operator'] = 0
+
+ if condition['value'] == 'acknowledged':
+ condition['operator'] = 1
+ else:
+ condition['operator'] = 0
+
+
+ return inc_operations
+
+def get_operation_evaltype(inc_type):
+ '''get the operation evaltype'''
+ rval = 0
+ if inc_type == 'and/or':
+ rval = 0
+ elif inc_type == 'and':
+ rval = 1
+ elif inc_type == 'or':
+ rval = 2
+ elif inc_type == 'custom':
+ rval = 3
+
+ return rval
+
+def get_action_conditions(zapi, event_source, inc_conditions):
+ '''Convert the conditions into syntax for api'''
+
+ calc_type = inc_conditions.pop('calculation_type')
+ inc_conditions['evaltype'] = get_operation_evaltype(calc_type)
+ for cond in inc_conditions['conditions']:
+
+ cond['operator'] = get_condition_operator(cond['operator'])
+ # Based on conditiontype we need to set the proper value
+ # e.g. conditiontype = hostgroup then the value needs to be a hostgroup id
+ # e.g. conditiontype = host the value needs to be a host id
+ cond['conditiontype'] = get_condition_type(event_source, cond['conditiontype'])
+ if cond['conditiontype'] == 0:
+ cond['value'] = get_host_group_id_by_name(zapi, cond['value'])
+ elif cond['conditiontype'] == 1:
+ cond['value'] = get_host_id_by_name(zapi, cond['value'])
+ elif cond['conditiontype'] == 4:
+ cond['value'] = get_priority(cond['value'])
+
+ elif cond['conditiontype'] == 5:
+ cond['value'] = get_trigger_value(cond['value'])
+ elif cond['conditiontype'] == 13:
+ cond['value'] = get_template_id_by_name(zapi, cond['value'])
+ elif cond['conditiontype'] == 16:
+ cond['value'] = ''
+
+ return inc_conditions
+
+
+def get_send_recovery(send_recovery):
+ '''Get the integer value'''
+ rval = 0
+ if send_recovery:
+ rval = 1
+
+ return rval
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_item
+ '''
+
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+
+ name=dict(default=None, type='str'),
+ event_source=dict(default='trigger', choices=['trigger', 'discovery', 'auto', 'internal'], type='str'),
+ action_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
+ action_message=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}\r\n" +
+ "Last value: {ITEM.LASTVALUE}\r\n\r\n{TRIGGER.URL}", type='str'),
+ reply_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
+ reply_message=dict(default="Trigger: {TRIGGER.NAME}\r\nTrigger status: {TRIGGER.STATUS}\r\n" +
+ "Trigger severity: {TRIGGER.SEVERITY}\r\nTrigger URL: {TRIGGER.URL}\r\n\r\n" +
+ "Item values:\r\n\r\n1. {ITEM.NAME1} ({HOST.NAME1}:{ITEM.KEY1}): " +
+ "{ITEM.VALUE1}\r\n2. {ITEM.NAME2} ({HOST.NAME2}:{ITEM.KEY2}): " +
+ "{ITEM.VALUE2}\r\n3. {ITEM.NAME3} ({HOST.NAME3}:{ITEM.KEY3}): " +
+ "{ITEM.VALUE3}", type='str'),
+ send_recovery=dict(default=False, type='bool'),
+ status=dict(default=None, type='str'),
+ escalation_time=dict(default=60, type='int'),
+ conditions_filter=dict(default=None, type='dict'),
+ operations=dict(default=None, type='list'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'action'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'name': module.params['name']},
+ 'selectFilter': 'extend',
+ 'selectOperations': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
+ operations = get_action_operations(zapi, module.params['operations'])
+ params = {'name': module.params['name'],
+ 'esc_period': module.params['escalation_time'],
+ 'eventsource': get_event_source(module.params['event_source']),
+ 'status': get_status(module.params['status']),
+ 'def_shortdata': module.params['action_subject'],
+ 'def_longdata': module.params['action_message'],
+ 'r_shortdata': module.params['reply_subject'],
+ 'r_longdata': module.params['reply_message'],
+ 'recovery_msg': get_send_recovery(module.params['send_recovery']),
+ 'filter': conditions,
+ 'operations': operations,
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ _ = params.pop('hostid', None)
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'operations':
+ ops = operation_differences(zab_results[key], value)
+ if ops:
+ differences[key] = ops
+
+ elif key == 'filter':
+ filters = filter_differences(zab_results[key], value)
+ if filters:
+ differences[key] = filters
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update.
+ # action update requires an id, filters, and operations
+ differences['actionid'] = zab_results['actionid']
+ differences['operations'] = params['operations']
+ differences['filter'] = params['filter']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_application.py b/roles/lib_zabbix/library/zbx_application.py
index d3d08c9dd..21e3d91f4 100644
--- a/roles/lib_zabbix/library/zbx_application.py
+++ b/roles/lib_zabbix/library/zbx_application.py
@@ -41,16 +41,17 @@ def exists(content, key='result'):
return True
-def get_template_ids(zapi, template_names):
+def get_template_ids(zapi, template_name):
'''
get related templates
'''
template_ids = []
# Fetch templates by name
- for template_name in template_names:
- content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
- if content.has_key('result'):
- template_ids.append(content['result'][0]['templateid'])
+ content = zapi.get_content('template',
+ 'get',
+ {'search': {'host': template_name}})
+ if content.has_key('result'):
+ template_ids.append(content['result'][0]['templateid'])
return template_ids
def main():
@@ -63,8 +64,8 @@ def main():
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- template_name=dict(default=None, type='list'),
+ name=dict(default=None, type='str', required=True),
+ template_name=dict(default=None, type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
@@ -81,10 +82,11 @@ def main():
aname = module.params['name']
state = module.params['state']
# get a applicationid, see if it exists
+ tids = get_template_ids(zapi, module.params['template_name'])
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'name': aname},
- 'selectHost': 'hostid',
+ 'templateids': tids[0],
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
@@ -97,9 +99,10 @@ def main():
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
- params = {'hostid': get_template_ids(zapi, module.params['template_name'])[0],
+ params = {'hostid': tids[0],
'name': aname,
}
+
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
diff --git a/roles/lib_zabbix/library/zbx_discoveryrule.py b/roles/lib_zabbix/library/zbx_discoveryrule.py
index 71a0580c2..f52f350a5 100644
--- a/roles/lib_zabbix/library/zbx_discoveryrule.py
+++ b/roles/lib_zabbix/library/zbx_discoveryrule.py
@@ -85,6 +85,7 @@ def main():
Ansible module for zabbix discovery rules
'''
+
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
@@ -93,6 +94,7 @@ def main():
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
ztype=dict(default='trapper', type='str'),
delay=dict(default=60, type='int'),
@@ -113,18 +115,27 @@ def main():
idname = "itemid"
dname = module.params['name']
state = module.params['state']
+ template = get_template(zapi, module.params['template_name'])
# selectInterfaces doesn't appear to be working but is needed.
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'name': dname},
+ 'templateids': template['templateid'],
#'selectDServices': 'extend',
#'selectDChecks': 'extend',
#'selectDhosts': 'dhostid',
})
+
+ #******#
+ # GET
+ #******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ #******#
+ # DELETE
+ #******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
@@ -132,24 +143,37 @@ def main():
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
if state == 'present':
- template = get_template(zapi, module.params['template_name'])
params = {'name': dname,
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
'lifetime': module.params['lifetime'],
'type': get_type(module.params['ztype']),
+ 'description': module.params['description'],
}
if params['type'] in [2, 5, 7, 11]:
params.pop('interfaceid')
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
if not exists(content):
- # if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
+
+ ########
+ # UPDATE
+ ########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
@@ -163,6 +187,10 @@ def main():
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
index bcd389e47..6faa82dfc 100644
--- a/roles/lib_zabbix/library/zbx_item.py
+++ b/roles/lib_zabbix/library/zbx_item.py
@@ -53,6 +53,8 @@ def get_value_type(value_type):
vtype = 0
if 'int' in value_type:
vtype = 3
+ elif 'log' in value_type:
+ vtype = 2
elif 'char' in value_type:
vtype = 1
elif 'str' in value_type:
@@ -60,18 +62,53 @@ def get_value_type(value_type):
return vtype
-def get_app_ids(zapi, application_names):
+def get_app_ids(application_names, app_name_ids):
''' get application ids from names
'''
- if isinstance(application_names, str):
- application_names = [application_names]
- app_ids = []
- for app_name in application_names:
- content = zapi.get_content('application', 'get', {'search': {'name': app_name}})
- if content.has_key('result'):
- app_ids.append(content['result'][0]['applicationid'])
- return app_ids
+ applications = []
+ if application_names:
+ for app in application_names:
+ applications.append(app_name_ids[app])
+ return applications
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ template_ids = []
+ app_ids = {}
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'search': {'host': template_name},
+ 'selectApplications': ['applicationid', 'name']})
+ if content.has_key('result'):
+ template_ids.append(content['result'][0]['templateid'])
+ for app in content['result'][0]['applications']:
+ app_ids[app['name']] = app['applicationid']
+
+ return template_ids, app_ids
+
+def get_multiplier(inval):
+ ''' Determine the multiplier
+ '''
+ if inval == None or inval == '':
+ return None, 0
+
+ rval = None
+ try:
+ rval = int(inval)
+ except ValueError:
+ pass
+
+ if rval:
+ return rval, 1
+
+ return rval, 0
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
def main():
'''
ansible zabbix module for zbx_item
@@ -88,7 +125,10 @@ def main():
template_name=dict(default=None, type='str'),
zabbix_type=dict(default=2, type='int'),
value_type=dict(default='int', type='str'),
- applications=dict(default=[], type='list'),
+ multiplier=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ units=dict(default=None, type='str'),
+ applications=dict(default=None, type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
@@ -101,59 +141,83 @@ def main():
#Set the instance and the template for the rest of the calls
zbx_class_name = 'item'
- idname = "itemid"
state = module.params['state']
- key = module.params['key']
- template_name = module.params['template_name']
-
- content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
- templateid = None
- if content['result']:
- templateid = content['result'][0]['templateid']
- else:
- module.exit_json(changed=False,
- results='Error: Could find template with name %s for item.' % template_name,
+
+ templateid, app_name_ids = get_template_id(zapi, module.params['template_name'])
+
+ # Fail if a template was not found matching the name
+ if not templateid:
+ module.exit_json(failed=True,
+ changed=False,
+ results='Error: Could find template with name %s for item.' % module.params['template_name'],
state="Unkown")
content = zapi.get_content(zbx_class_name,
'get',
- {'search': {'key_': key},
+ {'search': {'key_': module.params['key']},
'selectApplications': 'applicationid',
+ 'templateids': templateid,
})
+ #******#
+ # GET
+ #******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ #******#
+ # DELETE
+ #******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
module.exit_json(changed=True, results=content['result'], state="absent")
+ # Create and Update
if state == 'present':
+
+ formula, use_multiplier = get_multiplier(module.params['multiplier'])
params = {'name': module.params.get('name', module.params['key']),
- 'key_': key,
- 'hostid': templateid,
+ 'key_': module.params['key'],
+ 'hostid': templateid[0],
'type': module.params['zabbix_type'],
'value_type': get_value_type(module.params['value_type']),
- 'applications': get_app_ids(zapi, module.params['applications']),
+ 'applications': get_app_ids(module.params['applications'], app_name_ids),
+ 'formula': formula,
+ 'multiplier': use_multiplier,
+ 'description': module.params['description'],
+ 'units': module.params['units'],
}
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
if not exists(content):
- # if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
+
+
+ ########
+ # UPDATE
+ ########
+ _ = params.pop('hostid', None)
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'applications':
- zab_apps = set([item['applicationid'] for item in zab_results[key]])
- if zab_apps != set(value):
- differences[key] = zab_apps
+ app_ids = [item['applicationid'] for item in zab_results[key]]
+ if set(app_ids) != set(value):
+ differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
@@ -162,8 +226,12 @@ def main():
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
- differences[idname] = zab_results[idname]
+ differences['itemid'] = zab_results['itemid']
content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index 24f85710d..e7fd6fa21 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -38,13 +38,14 @@ def exists(content, key='result'):
return True
-def get_rule_id(zapi, discoveryrule_name):
+def get_rule_id(zapi, discoveryrule_key, templateid):
'''get a discoveryrule by name
'''
content = zapi.get_content('discoveryrule',
'get',
- {'search': {'name': discoveryrule_name},
+ {'search': {'key_': discoveryrule_key},
'output': 'extend',
+ 'templateids': templateid,
})
if not content['result']:
return None
@@ -53,6 +54,9 @@ def get_rule_id(zapi, discoveryrule_name):
def get_template(zapi, template_name):
'''get a template by name
'''
+ if not template_name:
+ return None
+
content = zapi.get_content('template',
'get',
{'search': {'host': template_name},
@@ -124,16 +128,17 @@ def get_status(status):
return _status
-def get_app_ids(zapi, application_names):
+def get_app_ids(zapi, application_names, templateid):
''' get application ids from names
'''
app_ids = []
for app_name in application_names:
- content = zapi.get_content('application', 'get', {'search': {'name': app_name}})
+ content = zapi.get_content('application', 'get', {'filter': {'name': app_name}, 'templateids': templateid})
if content.has_key('result'):
app_ids.append(content['result'][0]['applicationid'])
return app_ids
+# pylint: disable=too-many-branches
def main():
'''
Ansible module for zabbix discovery rules
@@ -147,16 +152,17 @@ def main():
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
ztype=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
- template_name=dict(default=[], type='list'),
state=dict(default='present', type='str'),
status=dict(default='enabled', type='str'),
- discoveryrule_name=dict(default=None, type='str'),
applications=dict(default=[], type='list'),
+ template_name=dict(default=None, type='str'),
+ discoveryrule_key=dict(default=None, type='str'),
),
#supports_check_mode=True
)
@@ -169,20 +175,27 @@ def main():
#Set the instance and the template for the rest of the calls
zbx_class_name = 'itemprototype'
idname = "itemid"
- dname = module.params['name']
state = module.params['state']
+ template = get_template(zapi, module.params['template_name'])
# selectInterfaces doesn't appear to be working but is needed.
content = zapi.get_content(zbx_class_name,
'get',
- {'search': {'name': dname},
+ {'search': {'key_': module.params['key']},
'selectApplications': 'applicationid',
'selectDiscoveryRule': 'itemid',
- #'selectDhosts': 'dhostid',
+ 'templated': True,
})
+
+ #******#
+ # GET
+ #******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ #******#
+ # DELETE
+ #******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
@@ -190,26 +203,39 @@ def main():
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
+ # Create and Update
if state == 'present':
- template = get_template(zapi, module.params['template_name'])
- params = {'name': dname,
+ params = {'name': module.params['name'],
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
- 'ruleid': get_rule_id(zapi, module.params['discoveryrule_name']),
+ 'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
'type': get_type(module.params['ztype']),
'value_type': get_value_type(module.params['value_type']),
- 'applications': get_app_ids(zapi, module.params['applications']),
+ 'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
+ 'description': module.params['description'],
}
+
if params['type'] in [2, 5, 7, 8, 11, 15]:
params.pop('interfaceid')
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
if not exists(content):
- # if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
+
+ #******#
+ # UPDATE
+ #******#
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
@@ -218,6 +244,11 @@ def main():
if value != zab_results['discoveryRule']['itemid']:
differences[key] = value
+ elif key == 'applications':
+ app_ids = [app['applicationid'] for app in zab_results[key]]
+ if set(app_ids) - set(value):
+ differences[key] = value
+
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
@@ -227,6 +258,10 @@ def main():
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py
index 43f3d2677..ab7731faa 100644
--- a/roles/lib_zabbix/library/zbx_trigger.py
+++ b/roles/lib_zabbix/library/zbx_trigger.py
@@ -65,7 +65,7 @@ def get_deps(zapi, deps):
for desc in deps:
content = zapi.get_content('trigger',
'get',
- {'search': {'description': desc},
+ {'filter': {'description': desc},
'expandExpression': True,
'selectDependencies': 'triggerid',
})
@@ -74,6 +74,36 @@ def get_deps(zapi, deps):
return results
+
+def get_trigger_status(inc_status):
+ ''' Determine the trigger's status
+ 0 is enabled
+ 1 is disabled
+ '''
+ r_status = 0
+ if inc_status == 'disabled':
+ r_status = 1
+
+ return r_status
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ template_ids = []
+ app_ids = {}
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'search': {'host': template_name},
+ 'selectApplications': ['applicationid', 'name']})
+ if content.has_key('result'):
+ template_ids.append(content['result'][0]['templateid'])
+ for app in content['result'][0]['applications']:
+ app_ids[app['name']] = app['applicationid']
+
+ return template_ids, app_ids
+
def main():
'''
Create a trigger in zabbix
@@ -98,10 +128,14 @@ def main():
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
expression=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
description=dict(default=None, type='str'),
dependencies=dict(default=[], type='list'),
priority=dict(default='avg', type='str'),
+ url=dict(default=None, type='str'),
+ status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
+ template_name=dict(default=None, type='str'),
),
#supports_check_mode=True
)
@@ -115,36 +149,60 @@ def main():
zbx_class_name = 'trigger'
idname = "triggerid"
state = module.params['state']
- description = module.params['description']
+ tname = module.params['name']
+
+ templateid = None
+ if module.params['template_name']:
+ templateid, _ = get_template_id(zapi, module.params['template_name'])
content = zapi.get_content(zbx_class_name,
'get',
- {'search': {'description': description},
+ {'filter': {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
+ 'templateids': templateid,
})
+
+ # Get
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ # Delete
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
+ # Create and Update
if state == 'present':
- params = {'description': description,
+ params = {'description': tname,
+ 'comments': module.params['description'],
'expression': module.params['expression'],
'dependencies': get_deps(zapi, module.params['dependencies']),
'priority': get_priority(module.params['priority']),
+ 'url': module.params['url'],
+ 'status': get_trigger_status(module.params['status']),
}
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
+
+ ########
+ # UPDATE
+ ########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
diff --git a/roles/lib_zabbix/library/zbx_triggerprototype.py b/roles/lib_zabbix/library/zbx_triggerprototype.py
new file mode 100644
index 000000000..c1224b268
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_triggerprototype.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+'''
+ansible module for zabbix triggerprototypes
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix triggerprototypes ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_priority(priority):
+ ''' determine priority
+ '''
+ prior = 0
+ if 'info' in priority:
+ prior = 1
+ elif 'warn' in priority:
+ prior = 2
+ elif 'avg' == priority or 'ave' in priority:
+ prior = 3
+ elif 'high' in priority:
+ prior = 4
+ elif 'dis' in priority:
+ prior = 5
+
+ return prior
+
+def get_trigger_status(inc_status):
+ ''' Determine the trigger's status
+ 0 is enabled
+ 1 is disabled
+ '''
+ r_status = 0
+ if inc_status == 'disabled':
+ r_status = 1
+
+ return r_status
+
+
+def main():
+ '''
+ Create a triggerprototype in zabbix
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ expression=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ priority=dict(default='avg', type='str'),
+ url=dict(default=None, type='str'),
+ status=dict(default=None, type='str'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'triggerprototype'
+ idname = "triggerid"
+ state = module.params['state']
+ tname = module.params['name']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'description': tname},
+ 'expandExpression': True,
+ 'selectDependencies': 'triggerid',
+ })
+
+ # Get
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ # Delete
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+ params = {'description': tname,
+ 'comments': module.params['description'],
+ 'expression': module.params['expression'],
+ 'priority': get_priority(module.params['priority']),
+ 'url': module.params['url'],
+ 'status': get_trigger_status(module.params['status']),
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py
index c916fa96a..62c85c1bf 100644
--- a/roles/lib_zabbix/library/zbx_user.py
+++ b/roles/lib_zabbix/library/zbx_user.py
@@ -164,7 +164,7 @@ def main():
if key == 'usrgrps':
# this must be done as a list of ordered dictionaries fails comparison
- if not all([True for _ in zab_results[key][0] if _ in value[0]]):
+ if not all([_ in value for _ in zab_results[key]]):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py
index 3f7760475..8895c78c3 100644
--- a/roles/lib_zabbix/library/zbx_user_media.py
+++ b/roles/lib_zabbix/library/zbx_user_media.py
@@ -54,8 +54,8 @@ def get_mtype(zapi, mtype):
except ValueError:
pass
- content = zapi.get_content('mediatype', 'get', {'search': {'description': mtype}})
- if content.has_key['result'] and content['result']:
+ content = zapi.get_content('mediatype', 'get', {'filter': {'description': mtype}})
+ if content.has_key('result') and content['result']:
return content['result'][0]['mediatypeid']
return None
@@ -63,7 +63,7 @@ def get_mtype(zapi, mtype):
def get_user(zapi, user):
''' Get userids from user aliases
'''
- content = zapi.get_content('user', 'get', {'search': {'alias': user}})
+ content = zapi.get_content('user', 'get', {'filter': {'alias': user}})
if content['result']:
return content['result'][0]
@@ -104,15 +104,17 @@ def find_media(medias, user_media):
''' Find the user media in the list of medias
'''
for media in medias:
- if all([media[key] == user_media[key] for key in user_media.keys()]):
+ if all([media[key] == str(user_media[key]) for key in user_media.keys()]):
return media
return None
-def get_active(in_active):
+def get_active(is_active):
'''Determine active value
+ 0 - enabled
+ 1 - disabled
'''
active = 1
- if in_active:
+ if is_active:
active = 0
return active
@@ -128,6 +130,21 @@ def get_mediatype(zapi, mediatype, mediatype_desc):
return mtypeid
+def preprocess_medias(zapi, medias):
+ ''' Insert the correct information when processing medias '''
+ for media in medias:
+ # Fetch the mediatypeid from the media desc (name)
+ if media.has_key('mediatype'):
+ media['mediatypeid'] = get_mediatype(zapi, mediatype=None, mediatype_desc=media.pop('mediatype'))
+
+ media['active'] = get_active(media.get('active'))
+ media['severity'] = int(get_severity(media['severity']))
+
+ return medias
+
+# Disabling branching as the logic requires branches.
+# I've also added a few safeguards which required more branches.
+# pylint: disable=too-many-branches
def main():
'''
Ansible zabbix module for mediatype
@@ -166,11 +183,17 @@ def main():
# User media is fetched through the usermedia.get
zbx_user_query = get_zbx_user_query_data(zapi, module.params['login'])
- content = zapi.get_content('usermedia', 'get', zbx_user_query)
-
+ content = zapi.get_content('usermedia', 'get',
+ {'userids': [uid for user, uid in zbx_user_query.items()]})
+ #####
+ # Get
+ #####
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ ########
+ # Delete
+ ########
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
@@ -178,13 +201,14 @@ def main():
if not module.params['login']:
module.exit_json(failed=True, changed=False, results='Must specifiy a user login.', state="absent")
- content = zapi.get_content(zbx_class_name, 'deletemedia', [content['result'][0][idname]])
+ content = zapi.get_content(zbx_class_name, 'deletemedia', [res[idname] for res in content['result']])
if content.has_key('error'):
module.exit_json(changed=False, results=content['error'], state="absent")
module.exit_json(changed=True, results=content['result'], state="absent")
+ # Create and Update
if state == 'present':
active = get_active(module.params['active'])
mtypeid = get_mediatype(zapi, module.params['mediatype'], module.params['mediatype_desc'])
@@ -197,13 +221,21 @@ def main():
'severity': int(get_severity(module.params['severity'])),
'period': module.params['period'],
}]
+ else:
+ medias = preprocess_medias(zapi, medias)
params = {'users': [zbx_user_query],
'medias': medias,
'output': 'extend',
}
+ ########
+ # Create
+ ########
if not exists(content):
+ if not params['medias']:
+ module.exit_json(changed=False, results=content['result'], state='present')
+
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'addmedia', params)
@@ -216,6 +248,9 @@ def main():
# If user params exists, check to see if they already exist in zabbix
# if they exist, then return as no update
# elif they do not exist, then take user params only
+ ########
+ # Update
+ ########
diff = {'medias': [], 'users': {}}
_ = [diff['medias'].append(media) for media in params['medias'] if not find_media(content['result'], media)]
@@ -225,6 +260,9 @@ def main():
for user in params['users']:
diff['users']['userid'] = user['userid']
+ # Medias have no real unique key so therefore we need to make it like the incoming user's request
+ diff['medias'] = medias
+
# We have differences and need to update
content = zapi.get_content(zbx_class_name, 'updatemedia', diff)
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 022ca52f2..41381e76c 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -1,6 +1,4 @@
---
-- debug: var=template
-
- name: Template Create Template
zbx_template:
zbx_server: "{{ server }}"
@@ -9,12 +7,10 @@
name: "{{ template.name }}"
register: created_template
-- debug: var=created_template
- set_fact:
- lzbx_applications: "{{ template.zitems | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
-
-- debug: var=lzbx_applications
+ lzbx_item_applications: "{{ template.zitems | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
+ lzbx_itemprototype_applications: "{{ template.zitemprototypes | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
- name: Create Application
zbx_application:
@@ -23,11 +19,11 @@
zbx_password: "{{ password }}"
name: "{{ item }}"
template_name: "{{ template.name }}"
- with_items: lzbx_applications
+ with_items:
+ - "{{ lzbx_item_applications }}"
+ - "{{ lzbx_itemprototype_applications }}"
register: created_application
- when: template.zitems is defined
-
-- debug: var=created_application
+ when: template.zitems is defined or template.zitemprototypes is defined
- name: Create Items
zbx_item:
@@ -37,25 +33,66 @@
key: "{{ item.key }}"
name: "{{ item.name | default(item.key, true) }}"
value_type: "{{ item.value_type | default('int') }}"
+ description: "{{ item.description | default('', True) }}"
+ multiplier: "{{ item.multiplier | default('', True) }}"
+ units: "{{ item.units | default('', True) }}"
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
with_items: template.zitems
register: created_items
when: template.zitems is defined
-#- debug: var=ctp_created_items
-
- name: Create Triggers
zbx_trigger:
zbx_server: "{{ server }}"
zbx_user: "{{ user }}"
zbx_password: "{{ password }}"
- description: "{{ item.description }}"
+ name: "{{ item.name }}"
+ description: "{{ item.description | default('', True) }}"
+ dependencies: "{{ item.dependencies | default([], True) }}"
expression: "{{ item.expression }}"
priority: "{{ item.priority }}"
+ url: "{{ item.url | default(None, True) }}"
with_items: template.ztriggers
when: template.ztriggers is defined
-#- debug: var=ctp_created_triggers
+- name: Create Discoveryrules
+ zbx_discoveryrule:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ key: "{{ item.key }}"
+ lifetime: "{{ item.lifetime }}"
+ template_name: "{{ template.name }}"
+ description: "{{ item.description | default('', True) }}"
+ with_items: template.zdiscoveryrules
+ when: template.zdiscoveryrules is defined
+- name: Create Item Prototypes
+ zbx_itemprototype:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ key: "{{ item.key }}"
+ discoveryrule_key: "{{ item.discoveryrule_key }}"
+ value_type: "{{ item.value_type }}"
+ template_name: "{{ template.name }}"
+ applications: "{{ item.applications }}"
+ description: "{{ item.description | default('', True) }}"
+ with_items: template.zitemprototypes
+ when: template.zitemprototypes is defined
+- name: Create Trigger Prototypes
+ zbx_triggerprototype:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ expression: "{{ item.expression }}"
+ url: "{{ item.url | default('', True) }}"
+ priority: "{{ item.priority | default('average', True) }}"
+ description: "{{ item.description | default('', True) }}"
+ with_items: template.ztriggerprototypes
+ when: template.ztriggerprototypes is defined
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index d9f2dafab..73bd28630 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Set common OpenShift facts
+- name: Set common Cluster facts
openshift_facts:
role: common
local_facts:
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 8e7d71154..50816d319 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,5 +5,3 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
-
-openshift_data_dir: /var/lib/openshift
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 3246790aa..7d4f100e3 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -14,3 +14,5 @@ db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
quickstarts_base: "{{ examples_base }}/quickstart-templates"
+
+openshift_examples_import_command: "create"
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
index 03affbddf..f213d99ca 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
@@ -161,19 +161,19 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/wildfly-8-centos",
+ "dockerImageRepository": "openshift/wildfly-81-centos7",
"tags": [
{
"name": "latest"
},
{
- "name": "8",
+ "name": "8.1",
"annotations": {
- "description": "Build and run Java applications on Wildfly 8",
+ "description": "Build and run Java applications on Wildfly 8.1",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
- "supports":"wildfly:8,jee,java",
- "version": "8"
+ "supports":"wildfly:8.1,jee,java",
+ "version": "8.1"
},
"from": {
"Kind": "ImageStreamTag",
@@ -260,13 +260,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "openshift/jenkins-16-centos7",
+ "dockerImageRepository": "openshift/jenkins-1-centos7",
"tags": [
{
"name": "latest"
},
{
- "name": "1.6",
+ "name": "1",
"from": {
"Kind": "ImageStreamTag",
"Name": "latest"
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
index 0bd885af3..8c125f76a 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
@@ -230,13 +230,13 @@
"creationTimestamp": null
},
"spec": {
- "dockerImageRepository": "registry.access.redhat.com/openshift3/jenkins-16-rhel7",
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/jenkins-1-rhel7",
"tags": [
{
"name": "latest"
},
{
- "name": "1.6",
+ "name": "1",
"from": {
"Kind": "ImageStreamTag",
"Name": "latest"
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json
index da08ffbd5..14bd032af 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json
@@ -88,7 +88,7 @@
"containers": [
{
"name": "jenkins",
- "image": "openshift/jenkins-16-centos7",
+ "image": "${JENKINS_IMAGE}",
"env": [
{
"name": "JENKINS_PASSWORD",
@@ -133,6 +133,11 @@
"value": "jenkins"
},
{
+ "name": "JENKINS_IMAGE",
+ "description": "Jenkins Docker image to use",
+ "value": "openshift/jenkins-1-centos7"
+ },
+ {
"name": "JENKINS_PASSWORD",
"description": "Password for the Jenkins user",
"generate": "expression",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json
index 33df68c74..fa31de486 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json
@@ -105,7 +105,7 @@
"containers": [
{
"name": "jenkins",
- "image": "openshift/jenkins-16-centos7",
+ "image": "${JENKINS_IMAGE}",
"env": [
{
"name": "JENKINS_PASSWORD",
@@ -156,6 +156,11 @@
"value": "password"
},
{
+ "name": "JENKINS_IMAGE",
+ "description": "Jenkins Docker image to use",
+ "value": "openshift/jenkins-1-centos7"
+ },
+ {
"name": "VOLUME_CAPACITY",
"description": "Volume space available for data, e.g. 512Mi, 2Gi",
"value": "512Mi",
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
index 0497e6824..5df36ccc2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
@@ -6,10 +6,10 @@
"iconClass" : "icon-jboss",
"description": "Application template for EAP 6 applications built using STI."
},
- "name": "eap6-basic-sti"
+ "name": "eap6-https-sti"
},
"labels": {
- "template": "eap6-basic-sti"
+ "template": "eap6-https-sti"
},
"parameters": [
{
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index bfc6dfb0a..3a829a4c6 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -7,7 +7,7 @@
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ rhel_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ rhel_image_streams }}
when: openshift_examples_load_rhel
register: oex_import_rhel_streams
failed_when: "'already exists' not in oex_import_rhel_streams.stderr and oex_import_rhel_streams.rc != 0"
@@ -15,7 +15,7 @@
- name: Import Centos Image streams
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ centos_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ centos_image_streams }}
when: openshift_examples_load_centos | bool
register: oex_import_centos_streams
failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0"
@@ -23,7 +23,7 @@
- name: Import db templates
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ db_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ db_templates_base }}
when: openshift_examples_load_db_templates | bool
register: oex_import_db_templates
failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -31,7 +31,7 @@
- name: Import quickstart-templates
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ quickstarts_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }}
when: openshift_examples_load_quickstarts
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -40,7 +40,7 @@
- name: Import xPaas image streams
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_image_streams }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_streams
failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -48,7 +48,7 @@
- name: Import xPaas templates
command: >
- {{ openshift.common.client_binary }} create -n openshift -f {{ xpaas_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_templates_base }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_templates
failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index c13e96c2b..69bb49c9b 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -6,7 +6,7 @@
DOCUMENTATION = '''
---
module: openshift_facts
-short_description: OpenShift Facts
+short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
@@ -16,6 +16,7 @@ EXAMPLES = '''
import ConfigParser
import copy
import os
+from distutils.util import strtobool
def hostname_valid(hostname):
@@ -283,28 +284,6 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
-def set_registry_url_if_unset(facts):
- """ Set registry_url fact if not already present in facts dict
-
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated identity providers
- facts if they were not already present
- """
- for role in ('master', 'node'):
- if role in facts:
- deployment_type = facts['common']['deployment_type']
- if 'registry_url' not in facts[role]:
- registry_url = "openshift/origin-${component}:${version}"
- if deployment_type == 'enterprise':
- registry_url = "openshift3/ose-${component}:${version}"
- elif deployment_type == 'online':
- registry_url = ("openshift3/ose-${component}:${version}")
- facts[role]['registry_url'] = registry_url
-
- return facts
-
def set_fluentd_facts_if_unset(facts):
""" Set fluentd facts if not already present in facts dict
dict: the facts dict updated with the generated fluentd facts if
@@ -317,12 +296,28 @@ def set_fluentd_facts_if_unset(facts):
"""
if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
if 'use_fluentd' not in facts['common']:
- use_fluentd = True if deployment_type == 'online' else False
+ use_fluentd = False
facts['common']['use_fluentd'] = use_fluentd
return facts
+def set_node_schedulability(facts):
+ """ Set schedulable facts if not already present in facts dict
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated schedulable
+ facts if they were not already present
+
+ """
+ if 'node' in facts:
+ if 'schedulable' not in facts['node']:
+ if 'master' in facts:
+ facts['node']['schedulable'] = False
+ else:
+ facts['node']['schedulable'] = True
+ return facts
+
def set_metrics_facts_if_unset(facts):
""" Set cluster metrics facts if not already present in facts dict
dict: the facts dict updated with the generated cluster metrics facts if
@@ -447,6 +442,54 @@ def set_aggregate_facts(facts):
return facts
+def set_deployment_facts_if_unset(facts):
+ """ Set Facts that vary based on deployment_type. This currently
+ includes common.service_type, common.config_base, master.registry_url,
+ node.registry_url
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated deployment_type
+ facts
+ """
+ # Perhaps re-factor this as a map?
+ # pylint: disable=too-many-branches
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'service_type' not in facts['common']:
+ service_type = 'atomic-openshift'
+ if deployment_type == 'origin':
+ service_type = 'origin'
+ elif deployment_type in ['enterprise', 'online']:
+ service_type = 'openshift'
+ facts['common']['service_type'] = service_type
+ if 'config_base' not in facts['common']:
+ config_base = '/etc/origin'
+ if deployment_type in ['enterprise', 'online']:
+ config_base = '/etc/openshift'
+ facts['common']['config_base'] = config_base
+ if 'data_dir' not in facts['common']:
+ data_dir = '/var/lib/origin'
+ if deployment_type in ['enterprise', 'online']:
+ data_dir = '/var/lib/openshift'
+ facts['common']['data_dir'] = data_dir
+ facts['common']['version'] = get_openshift_version()
+
+ for role in ('master', 'node'):
+ if role in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'registry_url' not in facts[role]:
+ registry_url = 'openshift/origin-${component}:${version}'
+ if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ registry_url = 'openshift3/ose-${component}:${version}'
+ elif deployment_type == 'atomic-enterprise':
+ registry_url = 'aep3/aep-${component}:${version}'
+ facts[role]['registry_url'] = registry_url
+
+ return facts
+
+
def set_sdn_facts_if_unset(facts):
""" Set sdn facts if not already present in facts dict
@@ -457,8 +500,10 @@ def set_sdn_facts_if_unset(facts):
were not already present
"""
if 'common' in facts:
+ use_sdn = facts['common']['use_openshift_sdn']
+ if not (use_sdn == '' or isinstance(use_sdn, bool)):
+ facts['common']['use_openshift_sdn'] = bool(strtobool(str(use_sdn)))
if 'sdn_network_plugin_name' not in facts['common']:
- use_sdn = facts['common']['use_openshift_sdn']
plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
facts['common']['sdn_network_plugin_name'] = plugin
@@ -468,6 +513,10 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
+ if 'node' in facts:
+ if 'sdn_mtu' not in facts['node']:
+ facts['node']['sdn_mtu'] = '1450'
+
return facts
def format_url(use_ssl, hostname, port, path=''):
@@ -509,7 +558,7 @@ def get_current_config(facts):
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
- kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+ kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
@@ -550,6 +599,21 @@ def get_current_config(facts):
return current_config
+def get_openshift_version():
+ """ Get current version of openshift on the host
+
+ Returns:
+ version: the current openshift version
+ """
+ version = ''
+
+ if os.path.isfile('/usr/bin/openshift'):
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ versions = dict(e.split(' v') for e in output.splitlines())
+ version = versions.get('openshift', '')
+
+ #TODO: acknowledge the possility of a containerized install
+ return version
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
@@ -595,7 +659,7 @@ def merge_facts(orig, new):
facts = dict()
for key, value in orig.iteritems():
if key in new:
- if isinstance(value, dict):
+ if isinstance(value, dict) and isinstance(new[key], dict):
facts[key] = merge_facts(value, new[key])
else:
facts[key] = copy.copy(new[key])
@@ -656,25 +720,25 @@ def get_local_facts_from_file(filename):
class OpenShiftFactsUnsupportedRoleError(Exception):
- """OpenShift Facts Unsupported Role Error"""
+ """Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
- """OpenShift Facts File Write Error"""
+ """Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
- """OpenShift Facts Metadata Unavailable Error"""
+ """Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
- """ OpenShift Facts
+ """ Origin Facts
Attributes:
- facts (dict): OpenShift facts for the host
+ facts (dict): facts for the host
Args:
role (str): role for setting local facts
@@ -717,10 +781,11 @@ class OpenShiftFacts(object):
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
+ facts = set_node_schedulability(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_registry_url_if_unset(facts)
facts = set_sdn_facts_if_unset(facts)
+ facts = set_deployment_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
return dict(openshift=facts)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index b2cda3a85..6301d4fc0 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,10 +1,10 @@
---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
assert:
that:
- ansible_version | version_compare('1.8.0', 'ge')
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
openshift_facts:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 74e702248..637e494ea 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -1,25 +1,21 @@
- name: Wait for Node Registration
command: >
- {{ openshift.common.client_binary }} get node {{ item }}
+ {{ openshift.common.client_binary }} get node {{ item | lower }}
register: omd_get_node
until: omd_get_node.rc == 0
- retries: 10
+ retries: 20
delay: 5
with_items: openshift_nodes
-- name: Handle unscheduleable node
+- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=false
- with_items: openshift_unscheduleable_nodes
-
-- name: Handle scheduleable node
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true
- with_items: openshift_scheduleable_nodes
+ {{ openshift.common.admin_binary }} manage-node {{ item.openshift.common.hostname | lower }} --schedulable={{ 'true' if item.openshift.node.schedulable | bool else 'false' }}
+ with_items:
+ - "{{ openshift_node_vars }}"
- name: Label nodes
command: >
- {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }}
+ {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname | lower }} {{ item.openshift.node.labels | oo_combine_dict }}
with_items:
- "{{ openshift_node_vars }}"
when: "'labels' in item.openshift.node and item.openshift.node.labels != {}"
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 0e7ef3aab..155bdb58b 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -1,7 +1,7 @@
-OpenShift Master
-================
+OpenShift/Atomic Enterprise Master
+==================================
-OpenShift Master service installation
+Master service installation
Requirements
------------
@@ -15,8 +15,8 @@ Role Variables
From this role:
| Name | Default value | |
|-------------------------------------|-----------------------|--------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
| oreg_url | UNDEF | Default docker registry to use |
| openshift_master_api_port | UNDEF | |
| openshift_master_console_port | UNDEF | |
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index ca8860099..9766d01ae 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -5,11 +5,11 @@ openshift_node_ips: []
os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
-- service: OpenShift api https
+- service: api server https
port: 8443/tcp
-- service: OpenShift dns tcp
+- service: dns tcp
port: 53/tcp
-- service: OpenShift dns udp
+- service: dns udp
port: 53/udp
- service: Fluentd td-agent tcp
port: 24224/tcp
@@ -22,9 +22,9 @@ os_firewall_allow:
- service: Corosync UDP
port: 5405/udp
os_firewall_deny:
-- service: OpenShift api http
+- service: api server http
port: 8080/tcp
-- service: former OpenShift web console port
+- service: former web console port
port: 8444/tcp
- service: former etcd peer port
port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index f1e7e1ab3..2981979e0 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,4 @@
---
-- name: restart openshift-master
- service: name=openshift-master state=restarted
+- name: restart master
+ service: name={{ openshift.common.service_type }}-master state=restarted
when: not openshift_master_ha | bool
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 41a183c3b..c125cb5d0 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Jhon Honce
- description: OpenShift Master
+ description: Master
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.7
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 9204d25ce..73c04cb08 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -12,11 +12,7 @@
msg: "openshift_master_cluster_password must be set for multi-master installations"
when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
-- name: Install OpenShift Master package
- yum: pkg=openshift-master state=present
- register: install_result
-
-- name: Set master OpenShift facts
+- name: Set master facts
openshift_facts:
role: master
local_facts:
@@ -59,8 +55,26 @@
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
+- name: Install Master package
+ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ register: install_result
+
+- name: Check for RPM generated config marker file /etc/origin/.config_managed
+ stat: path=/etc/origin/.rpmgenerated
+ register: rpmgenerated_config
+
+- name: Remove RPM generated config files
+ file:
+ path: "{{ item }}"
+ state: absent
+ when: openshift.common.service_type in ['atomic-enterprise','openshift-enterprise'] and rpmgenerated_config.stat.exists == true
+ with_items:
+ - "{{ openshift.common.config_base }}/master"
+ - "{{ openshift.common.config_base }}/node"
+ - "{{ openshift.common.config_base }}/.rpmgenerated"
+
# TODO: These values need to be configurable
-- name: Set dns OpenShift facts
+- name: Set dns facts
openshift_facts:
role: dns
local_facts:
@@ -80,20 +94,28 @@
args:
creates: "{{ openshift_master_policy }}"
notify:
- - restart openshift-master
+ - restart master
- name: Create the scheduler config
template:
dest: "{{ openshift_master_scheduler_conf }}"
src: scheduler.json.j2
+ backup: true
notify:
- - restart openshift-master
+ - restart master
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+- name: Ensure htpasswd directory exists
+ file:
+ path: "{{ item.filename | dirname }}"
+ state: directory
+ when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
- name: Create the htpasswd file if needed
copy:
dest: "{{ item.filename }}"
@@ -108,12 +130,13 @@
template:
dest: "{{ openshift_master_config_file }}"
src: master.yaml.v1.j2
+ backup: true
notify:
- - restart openshift-master
+ - restart master
-- name: Configure OpenShift settings
+- name: Configure master settings
lineinfile:
- dest: /etc/sysconfig/openshift-master
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
with_items:
@@ -122,10 +145,10 @@
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift_master_config_file }}"
notify:
- - restart openshift-master
+ - restart master
-- name: Start and enable openshift-master
- service: name=openshift-master enabled=yes state=started
+- name: Start and enable master
+ service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
register: start_result
@@ -146,20 +169,24 @@
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
when: install_result | changed
-- name: Create the OpenShift client config dir(s)
+- name: Lookup default group for ansible_ssh_user
+ command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+ register: _ansible_ssh_user_gid
+
+- name: Create the client config dir(s)
file:
path: "~{{ item }}/.kube"
state: directory
mode: 0700
owner: "{{ item }}"
- group: "{{ item }}"
+ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items:
- root
- "{{ ansible_ssh_user }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Copy the OpenShift admin client config(s)
+- name: Copy the admin client config(s)
command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.kube/config
args:
creates: ~{{ item }}/.kube/config
@@ -167,13 +194,13 @@
- root
- "{{ ansible_ssh_user }}"
-- name: Update the permissions on the OpenShift admin client config(s)
+- name: Update the permissions on the admin client config(s)
file:
path: "~{{ item }}/.kube/config"
state: file
mode: 0700
owner: "{{ item }}"
- group: "{{ item }}"
+ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items:
- root
- "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index fff123d0d..cc1dee13d 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -46,7 +46,7 @@ etcdConfig:
certFile: etcd.server.crt
clientCA: ca.crt
keyFile: etcd.server.key
- storageDirectory: {{ openshift_data_dir }}/openshift.local.etcd
+ storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd
{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
@@ -87,7 +87,11 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+ {% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+ {% endif %}
+# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
+ serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
diff --git a/roles/openshift_master/templates/scheduler.json.j2 b/roles/openshift_master/templates/scheduler.json.j2
index 835f2383e..cb5f43bb2 100644
--- a/roles/openshift_master/templates/scheduler.json.j2
+++ b/roles/openshift_master/templates/scheduler.json.j2
@@ -1,4 +1,6 @@
{
+ "kind": "Policy",
+ "apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
index 72889bc29..8a4f5a746 100644
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -80,6 +80,7 @@ oauthConfig:
provider:
{{ identity_provider_config(identity_provider) }}
{%- endfor %}
+ masterCA: ca.crt
masterPublicURL: {{ openshift.master.public_api_url }}
masterURL: {{ openshift.master.api_url }}
sessionConfig:
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index f6f69966a..ecdb4f883 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,8 +1,9 @@
---
-openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
openshift_master_valid_grant_methods:
- auto
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 03eb7e15f..5c9639ea5 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,6 @@
---
-- name: Install the OpenShift package for admin tooling
- yum: pkg=openshift state=present
+- name: Install the base package for admin tooling
+ yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
register: install_result
- name: Reload generated facts
diff --git a/roles/openshift_master_ca/vars/main.yml b/roles/openshift_master_ca/vars/main.yml
index 2925680bb..b35339b18 100644
--- a/roles/openshift_master_ca/vars/main.yml
+++ b/roles/openshift_master_ca/vars/main.yml
@@ -1,5 +1,6 @@
---
-openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index bb23cf4f4..0d75a9eb3 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -44,5 +44,3 @@
args:
creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt"
with_items: masters_needing_certs
-
-
diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml
index 6214f7918..3f18ddc79 100644
--- a/roles/openshift_master_certificates/vars/main.yml
+++ b/roles/openshift_master_certificates/vars/main.yml
@@ -1,3 +1,3 @@
---
-openshift_generated_configs_dir: /etc/openshift/generated-configs
-openshift_master_config_dir: /etc/openshift/master
+openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
index 8ddc8bfda..7ab9afb51 100644
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ b/roles/openshift_master_cluster/tasks/configure.yml
@@ -22,14 +22,14 @@
command: pcs resource defaults resource-stickiness=100
- name: Add the cluster VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group openshift-master
+ command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
- name: Add the cluster public VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group openshift-master
+ command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
-- name: Add the cluster openshift-master service resource
- command: pcs resource create master systemd:openshift-master op start timeout=90s stop timeout=90s --group openshift-master
+- name: Add the cluster master service resource
+ command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
- name: Disable stonith
command: pcs property set stonith-enabled=false
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
index a80b6c5b4..3b416005b 100644
--- a/roles/openshift_master_cluster/tasks/configure_deferred.yml
+++ b/roles/openshift_master_cluster/tasks/configure_deferred.yml
@@ -1,8 +1,8 @@
---
- debug: msg="Deferring config"
-- name: Start and enable openshift-master
+- name: Start and enable the master
service:
- name: openshift-master
+ name: "{{ openshift.common.service_type }}-master"
state: started
enabled: yes
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 427269931..3aff81274 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -1,12 +1,12 @@
-OpenShift Node
-==============
+OpenShift/Atomic Enterprise Node
+================================
-OpenShift Node service installation
+Node service installation
Requirements
------------
-One or more OpenShift Master servers.
+One or more Master servers.
A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
@@ -14,10 +14,10 @@ rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
From this role:
-| Name | Default value | |
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|------------------------------------------|-----------------------|--------------------------------------------------------|
+| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
From openshift_common:
| Name | Default Value | |
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 1dbcc4301..c4abf9d7c 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,6 +1,6 @@
---
os_firewall_allow:
-- service: OpenShift kubelet
+- service: Kubernetes kubelet
port: 10250/tcp
- service: http
port: 80/tcp
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 8b5acefbf..633f3ed13 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,6 @@
---
-- name: restart openshift-node
- service: name=openshift-node state=restarted
+- name: restart node
+ service: name={{ openshift.common.service_type }}-node state=restarted
- name: restart docker
service: name=docker state=restarted
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 7679adbf3..d45dd8073 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -10,16 +10,7 @@
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
-- name: Install OpenShift Node package
- yum: pkg=openshift-node state=present
- register: node_install_result
-
-- name: Install openshift-sdn-ovs
- yum: pkg=openshift-sdn-ovs state=present
- register: sdn_install_result
- when: openshift.common.use_openshift_sdn
-
-- name: Set node OpenShift facts
+- name: Set node facts
openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
@@ -31,24 +22,38 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- labels: "{{ openshift_node_labels | default(none) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
annotations: "{{ openshift_node_annotations | default(none) }}"
registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+
+# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
+# problems because the rpms don't pin the version properly.
+- name: Install Node package
+ yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ register: node_install_result
+
+- name: Install sdn-ovs package
+ yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
+ register: sdn_install_result
+ when: openshift.common.use_openshift_sdn
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
dest: "{{ openshift_node_config_file }}"
src: node.yaml.v1.j2
+ backup: true
notify:
- - restart openshift-node
+ - restart node
-- name: Configure OpenShift Node settings
+- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/openshift-node
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
with_items:
@@ -57,13 +62,13 @@
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift_node_config_file }}"
notify:
- - restart openshift-node
+ - restart node
- stat: path=/etc/sysconfig/docker
register: docker_check
# TODO: Enable secure registry when code available in origin
-- name: Secure OpenShift Registry
+- name: Secure Registry
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
@@ -119,8 +124,8 @@
seboolean: name=virt_use_nfs state=yes persistent=yes
when: ansible_selinux and ansible_selinux.status == "enabled"
-- name: Start and enable openshift-node
- service: name=openshift-node enabled=yes state=started
+- name: Start and enable node
+ service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
- name: pause to prevent service restart from interfering with bootstrapping
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index e176e7511..4931d127e 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -12,13 +12,22 @@ kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
+{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
-nodeName: {{ openshift.common.hostname }}
+{% endif %}
+# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
+# deprecates networkPluginName above. The two should match.
+networkConfig:
+ mtu: {{ openshift.node.sdn_mtu }}
+{% if openshift.common.use_openshift_sdn %}
+ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% endif %}
+nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
bindAddress: 0.0.0.0:10250
certFile: server.crt
clientCA: ca.crt
keyFile: server.key
-volumeDirectory: {{ openshift_data_dir }}/openshift.local.volumes
-{% include 'partials/kubeletArguments.j2' %} \ No newline at end of file
+volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
+{% include 'partials/kubeletArguments.j2' %}
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
index cf47f8354..43dc50ca8 100644
--- a/roles/openshift_node/vars/main.yml
+++ b/roles/openshift_node/vars/main.yml
@@ -1,3 +1,4 @@
---
-openshift_node_config_dir: /etc/openshift/node
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md
index c6304e4b0..6264d253a 100644
--- a/roles/openshift_node_certificates/README.md
+++ b/roles/openshift_node_certificates/README.md
@@ -1,5 +1,5 @@
-OpenShift Node Certificates
-========================
+OpenShift/Atomic Enterprise Node Certificates
+=============================================
TODO
diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml
index a018bb0f9..61fbb1e51 100644
--- a/roles/openshift_node_certificates/vars/main.yml
+++ b/roles/openshift_node_certificates/vars/main.yml
@@ -1,7 +1,7 @@
---
-openshift_node_config_dir: /etc/openshift/node
-openshift_master_config_dir: /etc/openshift/master
-openshift_generated_configs_dir: /etc/openshift/generated-configs
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml
index 9fb501e85..9967e26f4 100644
--- a/roles/openshift_registry/vars/main.yml
+++ b/roles/openshift_registry/vars/main.yml
@@ -1,3 +1,2 @@
---
-openshift_master_config_dir: /etc/openshift/master
-
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index bbb4c77e7..319611a0b 100644
--- a/roles/openshift_repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
@@ -1,2 +1,7 @@
---
-known_openshift_deployment_types: ['origin', 'online', 'enterprise']
+# origin uses community packages named 'origin'
+# online currently uses 'openshift' packages
+# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
+# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
+# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
+known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']
diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml
index 9fb501e85..9967e26f4 100644
--- a/roles/openshift_router/vars/main.yml
+++ b/roles/openshift_router/vars/main.yml
@@ -1,3 +1,2 @@
---
-openshift_master_config_dir: /etc/openshift/master
-
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
new file mode 100644
index 000000000..d93a25a21
--- /dev/null
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -0,0 +1,26 @@
+- name: Create service account configs
+ template:
+ src: serviceaccount.j2
+ dest: "/tmp/{{ item }}-serviceaccount.yaml"
+ with_items: accounts
+
+- name: Create {{ item }} service account
+ command: >
+ {{ openshift.common.client_binary }} create -f "/tmp/{{ item }}-serviceaccount.yaml"
+ with_items: accounts
+ register: _sa_result
+ failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0"
+ changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0"
+
+- name: Get current security context constraints
+ shell: "{{ openshift.common.client_binary }} get scc privileged -o yaml > /tmp/scc.yaml"
+
+- name: Add security context constraint for {{ item }}
+ lineinfile:
+ dest: /tmp/scc.yaml
+ line: "- system:serviceaccount:default:{{ item }}"
+ insertafter: "^users:$"
+ with_items: accounts
+
+- name: Apply new scc rules for service accounts
+ command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml"
diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2
new file mode 100644
index 000000000..931e249f9
--- /dev/null
+++ b/roles/openshift_serviceaccounts/templates/serviceaccount.j2
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ item }}
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index e9f5814bb..ead81b876 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -21,4 +21,4 @@
template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json
with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
-# TODO - Get the json files to an openshift-master, and load them. \ No newline at end of file
+# TODO - Get the json files to a master, and load them.
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index 7111c778b..a503b24d7 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -7,10 +7,14 @@
state: list
register: templates
-- debug: var=templates
-
- include_vars: template_heartbeat.yml
- include_vars: template_os_linux.yml
+- include_vars: template_docker.yml
+- include_vars: template_openshift_master.yml
+- include_vars: template_openshift_node.yml
+- include_vars: template_ops_tools.yml
+- include_vars: template_app_zabbix_server.yml
+- include_vars: template_app_zabbix_agent.yml
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
@@ -28,3 +32,50 @@
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+- name: Include Template docker
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_docker }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template Openshift Master
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_openshift_master }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template Openshift Node
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_openshift_node }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template Ops Tools
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_ops_tools }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template App Zabbix Server
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_app_zabbix_server }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template App Zabbix Agent
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_app_zabbix_agent }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
diff --git a/roles/os_zabbix/vars/template_app_zabbix_agent.yml b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
new file mode 100644
index 000000000..06c4eda8b
--- /dev/null
+++ b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
@@ -0,0 +1,23 @@
+---
+g_template_app_zabbix_agent:
+ name: Template App Zabbix Agent
+ zitems:
+ - key: agent.hostname
+ applications:
+ - Zabbix agent
+ value_type: character
+ zabbix_type: '0'
+
+ - key: agent.ping
+ applications:
+ - Zabbix agent
+ description: The agent always returns 1 for this item. It could be used in combination with nodata() for availability check.
+ value_type: int
+ zabbix_type: '0'
+
+ ztriggers:
+ - name: '[Reboot] Zabbix agent on {HOST.NAME} is unreachable for 15 minutes'
+ description: Zabbix agent is unreachable for 15 minutes.
+ expression: '{Template App Zabbix Agent:agent.ping.nodata(15m)}=1'
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_ping.asciidoc
diff --git a/roles/os_zabbix/vars/template_app_zabbix_server.yml b/roles/os_zabbix/vars/template_app_zabbix_server.yml
new file mode 100644
index 000000000..dace2aa29
--- /dev/null
+++ b/roles/os_zabbix/vars/template_app_zabbix_server.yml
@@ -0,0 +1,408 @@
+---
+g_template_app_zabbix_server:
+ name: Template App Zabbix Server
+ zitems:
+ - key: housekeeper_creates
+ applications:
+ - Zabbix server
+ description: A simple count of the number of partition creates output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: '2'
+
+ - key: housekeeper_drops
+ applications:
+ - Zabbix server
+ description: A simple count of the number of partition drops output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: '2'
+
+ - key: housekeeper_errors
+ applications:
+ - Zabbix server
+ description: A simple count of the number of errors output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: '2'
+
+ - key: housekeeper_total
+ applications:
+ - Zabbix server
+ description: A simple count of the total number of lines output by the housekeeper
+ script.
+ units: ''
+ value_type: int
+ zabbix_type: '2'
+
+ - key: zabbix[process,alerter,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,configuration syncer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,db watchdog,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,discoverer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,escalator,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,history syncer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,housekeeper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,http poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,icmp pinger,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,ipmi poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,java poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,node watcher,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,proxy poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,self-monitoring,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,snmp trapper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,timer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,trapper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[process,unreachable poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[queue,10m]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: int
+ zabbix_type: '5'
+
+ - key: zabbix[queue]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: int
+ zabbix_type: '5'
+
+ - key: zabbix[rcache,buffer,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[wcache,history,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[wcache,text,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[wcache,trend,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: '5'
+
+ - key: zabbix[wcache,values]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: '5'
+ ztriggers:
+ - description: "There has been unexpected output while running the housekeeping script\
+ \ on the Zabbix. There are only three kinds of lines we expect to see in the output,\
+ \ and we've gotten something enw.\r\n\r\nCheck the script's output in /var/lib/zabbix/state\
+ \ for more details."
+ expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}+{Template App Zabbix Server:housekeeper_creates.last(0)}+{Template App Zabbix Server:housekeeper_drops.last(0)}<>{Template App Zabbix Server:housekeeper_total.last(0)}'
+ name: Unexpected output in Zabbix DB Housekeeping
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_DB_Housekeeping.asciidoc
+
+ - description: An error has occurred during running the housekeeping script on the Zabbix. Check the script's output in /var/lib/zabbix/state for more details.
+ expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}>0'
+ name: Errors during Zabbix DB Housekeeping
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,alerter,avg,busy].min(600)}>75'
+ name: Zabbix alerter processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,configuration syncer,avg,busy].min(600)}>75'
+ name: Zabbix configuration syncer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,db watchdog,avg,busy].min(600)}>75'
+ name: Zabbix db watchdog processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,discoverer,avg,busy].min(600)}>75'
+ name: Zabbix discoverer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,escalator,avg,busy].min(600)}>75'
+ name: Zabbix escalator processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,history syncer,avg,busy].min(600)}>75'
+ name: Zabbix history syncer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,housekeeper,avg,busy].min(1800)}>75'
+ name: Zabbix housekeeper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,http poller,avg,busy].min(600)}>75'
+ name: Zabbix http poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,icmp pinger,avg,busy].min(600)}>75'
+ name: Zabbix icmp pinger processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,ipmi poller,avg,busy].min(600)}>75'
+ name: Zabbix ipmi poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,java poller,avg,busy].min(600)}>75'
+ name: Zabbix java poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,node watcher,avg,busy].min(600)}>75'
+ name: Zabbix node watcher processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,poller,avg,busy].min(600)}>75'
+ name: Zabbix poller processes more than 75% busy
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,proxy poller,avg,busy].min(600)}>75'
+ name: Zabbix proxy poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,self-monitoring,avg,busy].min(600)}>75'
+ name: Zabbix self-monitoring processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,snmp trapper,avg,busy].min(600)}>75'
+ name: Zabbix snmp trapper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: Timer processes usually are busy because they have to process time
+ based trigger functions
+ expression: '{Template App Zabbix Server:zabbix[process,timer,avg,busy].min(600)}>75'
+ name: Zabbix timer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,trapper,avg,busy].min(600)}>75'
+ name: Zabbix trapper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,unreachable poller,avg,busy].min(600)}>75'
+ name: Zabbix unreachable poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: "This alert generally indicates a performance problem or a problem\
+ \ with the zabbix-server or proxy.\r\n\r\nThe first place to check for issues\
+ \ is Administration > Queue. Be sure to check the general view and the per-proxy\
+ \ view."
+ expression: '{Template App Zabbix Server:zabbix[queue,10m].min(600)}>1000'
+ name: More than 1000 items having missing data for more than 10 minutes
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/data_lost_overview_plugin.asciidoc
+
+ - description: Consider increasing CacheSize in the zabbix_server.conf configuration
+ file
+ expression: '{Template App Zabbix Server:zabbix[rcache,buffer,pfree].min(600)}<5'
+ name: Less than 5% free in the configuration cache
+ priority: info
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,history,pfree].min(600)}<25'
+ name: Less than 25% free in the history cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,text,pfree].min(600)}<25'
+ name: Less than 25% free in the text history cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,trend,pfree].min(600)}<25'
+ name: Less than 25% free in the trends cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
new file mode 100644
index 000000000..395e054de
--- /dev/null
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -0,0 +1,89 @@
+---
+g_template_docker:
+ name: Template Docker
+ zitems:
+ - key: docker.ping
+ applications:
+ - Docker Daemon
+ value_type: int
+
+ - key: docker.storage.is_loopback
+ applications:
+ - Docker Storage
+ value_type: int
+
+ - key: docker.storage.data.space.total
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.data.space.used
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.data.space.available
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.data.space.percent_available
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.metadata.space.total
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.metadata.space.used
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.metadata.space.available
+ applications:
+ - Docker Storage
+ value_type: float
+
+ - key: docker.storage.metadata.space.percent_available
+ applications:
+ - Docker Storage
+ value_type: float
+ ztriggers:
+ - name: 'docker.ping failed on {HOST.NAME}'
+ expression: '{Template Docker:docker.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc'
+ priority: high
+
+ - name: 'Docker storage is using LOOPBACK on {HOST.NAME}'
+ expression: '{Template Docker:docker.storage.is_loopback.last()}<>0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_loopback.asciidoc'
+ priority: high
+
+ - name: 'Critically low docker storage data space on {HOST.NAME}'
+ expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.data.space.available.max(#3)}<5' # < 5% or < 5GB
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
+ priority: high
+
+ - name: 'Critically low docker storage metadata space on {HOST.NAME}'
+ expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.005' # < 5% or < 5MB
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
+ priority: high
+
+ # Put triggers that depend on other triggers here (deps must be created first)
+ - name: 'Low docker storage data space on {HOST.NAME}'
+ expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.data.space.available.max(#3)}<10' # < 10% or < 10GB
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
+ dependencies:
+ - 'Critically low docker storage data space on {HOST.NAME}'
+ priority: average
+
+ - name: 'Low docker storage metadata space on {HOST.NAME}'
+ expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.01' # < 10% or < 10MB
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
+ dependencies:
+ - 'Critically low docker storage metadata space on {HOST.NAME}'
+ priority: average
+
diff --git a/roles/os_zabbix/vars/template_heartbeat.yml b/roles/os_zabbix/vars/template_heartbeat.yml
index 3d0f3d51a..8dbe0d0d6 100644
--- a/roles/os_zabbix/vars/template_heartbeat.yml
+++ b/roles/os_zabbix/vars/template_heartbeat.yml
@@ -7,6 +7,7 @@ g_template_heartbeat:
- Heartbeat
key: heartbeat.ping
ztriggers:
- - description: 'Heartbeat.ping has failed on {HOST.NAME}'
+ - name: 'Heartbeat.ping has failed on {HOST.NAME}'
expression: '{Template Heartbeat:heartbeat.ping.nodata(20m)}=1'
priority: avg
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_node_heartbeat.asciidoc'
diff --git a/roles/os_zabbix/vars/template_host.yml b/roles/os_zabbix/vars/template_host.yml
deleted file mode 100644
index e7cc667cb..000000000
--- a/roles/os_zabbix/vars/template_host.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_host:
- params:
- name: Template Host
- host: Template Host
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Host
- zitems:
- - name: Host Ping
- hostid:
- key_: host.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: host.ping
- ztriggers:
- - description: 'Host ping has failed on {HOST.NAME}'
- expression: '{Template Host:host.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Host ping has failed on*'
- expandExpression: True
diff --git a/roles/os_zabbix/vars/template_master.yml b/roles/os_zabbix/vars/template_master.yml
deleted file mode 100644
index 5f9b41a4f..000000000
--- a/roles/os_zabbix/vars/template_master.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_master:
- params:
- name: Template Master
- host: Template Master
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Master
- zitems:
- - name: Master Etcd Ping
- hostid:
- key_: master.etcd.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: master.etcd.ping
- ztriggers:
- - description: 'Master Etcd ping has failed on {HOST.NAME}'
- expression: '{Template Master:master.etcd.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Master Etcd ping has failed on*'
- expandExpression: True
diff --git a/roles/os_zabbix/vars/template_node.yml b/roles/os_zabbix/vars/template_node.yml
deleted file mode 100644
index 98c343a24..000000000
--- a/roles/os_zabbix/vars/template_node.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_node:
- params:
- name: Template Node
- host: Template Node
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Node
- zitems:
- - name: Kubelet Ping
- hostid:
- key_: kubelet.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: kubelet.ping
- ztriggers:
- - description: 'Kubelet ping has failed on {HOST.NAME}'
- expression: '{Template Node:kubelet.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Kubelet ping has failed on*'
- expandExpression: True
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
new file mode 100644
index 000000000..1de4fefbb
--- /dev/null
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -0,0 +1,58 @@
+---
+g_template_openshift_master:
+ name: Template Openshift Master
+ zitems:
+ - name: create_app
+ applications:
+ - Openshift Master
+ key: create_app
+
+ - key: openshift.master.process.count
+ description: Shows number of master processes running
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.user.count
+ description: Shows number of users in a cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pod.running.count
+ description: Shows number of pods running
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.project.counter
+ description: Shows number of projects on a cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ ztriggers:
+ - name: 'Application creation has failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
+ priority: avg
+
+ - name: 'Openshift Master process not running on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: high
+
+ - name: 'Too many Openshift Master processes running on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.process.count.min(#3)}>1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: high
+
+ - name: 'Number of users for Openshift Master on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: info
+
+ - name: 'There are no projects running on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: info
diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml
new file mode 100644
index 000000000..ce28b1048
--- /dev/null
+++ b/roles/os_zabbix/vars/template_openshift_node.yml
@@ -0,0 +1,44 @@
+---
+g_template_openshift_node:
+ name: Template Openshift Node
+ zitems:
+ - key: openshift.node.process.count
+ description: Shows number of OpenShift Node processes running
+ type: int
+ applications:
+ - Openshift Node
+
+ - key: openshift.node.ovs.pids.count
+ description: Shows number of ovs process ids running
+ type: int
+ applications:
+ - Openshift Node
+
+ - key: openshift.node.ovs.ports.count
+ description: Shows number of OVS ports defined
+ type: int
+ applications:
+ - Openshift Node
+
+ ztriggers:
+ - name: 'Openshift Node process not running on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+ - name: 'Too many Openshift Node processes running on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.process.count.min(#3)}>1'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+ - name: 'OVS may not be running on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last()}<>4'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+ - name: 'Number of OVS ports is 0 on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.ovs.ports.count.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+
diff --git a/roles/os_zabbix/vars/template_ops_tools.yml b/roles/os_zabbix/vars/template_ops_tools.yml
new file mode 100644
index 000000000..d1b8a2514
--- /dev/null
+++ b/roles/os_zabbix/vars/template_ops_tools.yml
@@ -0,0 +1,23 @@
+---
+g_template_ops_tools:
+ name: Template Operations Tools
+ zdiscoveryrules:
+ - name: disc.ops.runner
+ key: disc.ops.runner
+ lifetime: 1
+ description: "Dynamically register operations runner items"
+
+ zitemprototypes:
+ - discoveryrule_key: disc.ops.runner
+ name: "Exit code of ops-runner[{#OSO_COMMAND}]"
+ key: "disc.ops.runner.command.exitcode[{#OSO_COMMAND}]"
+ value_type: int
+ description: "The exit code of the command run from ops-runner"
+ applications:
+ - Ops Runner
+
+ ztriggerprototypes:
+ - name: 'ops-runner[{#OSO_COMMAND}]: non-zero exit code on {HOST.NAME}'
+ expression: '{Template Operations Tools:disc.ops.runner.command.exitcode[{#OSO_COMMAND}].last()}<>0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_ops_runner_command.asciidoc'
+ priority: average
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 1c9d10bb0..69432273f 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -10,17 +10,20 @@ g_template_os_linux:
- key: kernel.all.cpu.wait.total
applications:
- Kernel
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.all.cpu.irq.hard
applications:
- Kernel
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.all.cpu.idle
applications:
- Kernel
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.uname.distro
applications:
@@ -35,7 +38,8 @@ g_template_os_linux:
- key: kernel.all.cpu.irq.soft
applications:
- Kernel
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.all.load.15_minute
applications:
@@ -45,32 +49,19 @@ g_template_os_linux:
- key: kernel.all.cpu.sys
applications:
- Kernel
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.all.load.5_minute
applications:
- Kernel
value_type: float
- - key: mem.freemem
- applications:
- - Memory
- value_type: int
-
- key: kernel.all.cpu.nice
applications:
- Kernel
- value_type: int
-
- - key: mem.util.bufmem
- applications:
- - Memory
- value_type: int
-
- - key: swap.used
- applications:
- - Memory
- value_type: int
+ value_type: float
+ units: '%'
- key: kernel.all.load.1_minute
applications:
@@ -82,92 +73,188 @@ g_template_os_linux:
- Kernel
value_type: string
- - key: swap.length
+ - key: kernel.all.uptime
applications:
- - Memory
+ - Kernel
value_type: int
- - key: mem.physmem
+ - key: kernel.all.cpu.user
applications:
- - Memory
- value_type: int
+ - Kernel
+ value_type: float
+ units: '%'
- - key: kernel.all.uptime
+ - key: kernel.uname.machine
applications:
- Kernel
- value_type: int
+ value_type: string
- - key: swap.free
+ - key: hinv.ncpu
applications:
- - Memory
+ - Kernel
value_type: int
- - key: mem.util.used
+ - key: kernel.all.cpu.steal
applications:
- - Memory
- value_type: int
+ - Kernel
+ value_type: float
+ units: '%'
- - key: kernel.all.cpu.user
+ - key: kernel.all.pswitch
applications:
- Kernel
value_type: int
- - key: kernel.uname.machine
+ - key: kernel.uname.release
applications:
- Kernel
value_type: string
- - key: hinv.ncpu
+ - key: proc.nprocs
applications:
- Kernel
value_type: int
- - key: mem.util.cached
+ # Memory Items
+ - key: mem.freemem
applications:
- Memory
value_type: int
+ description: "PCP: free system memory metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: kernel.all.cpu.steal
+ - key: mem.util.bufmem
applications:
- - Kernel
+ - Memory
value_type: int
+ description: "PCP: Memory allocated for buffer_heads.; I/O buffers metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: kernel.all.pswitch
+ - key: swap.used
applications:
- - Kernel
+ - Memory
value_type: int
+ description: "PCP: swap used metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: kernel.uname.release
+ - key: swap.length
applications:
- - Kernel
- value_type: string
+ - Memory
+ value_type: int
+ description: "PCP: total swap available metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: proc.nprocs
+ - key: mem.physmem
applications:
- - Kernel
+ - Memory
value_type: int
+ description: "PCP: The value of this metric corresponds to the \"MemTotal\" field reported by /proc/meminfo. Note that this does not necessarily correspond to actual installed physical memory - there may be areas of the physical address space mapped as ROM in various peripheral devices and the bios may be mirroring certain ROMs in RAM."
+ multiplier: 1024
+ units: B
- - key: filesys.avail
+ - key: swap.free
applications:
- - Disk
+ - Memory
value_type: int
+ description: "PCP: swap free metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: filesys.capacity
+ - key: mem.util.available
applications:
- - Disk
+ - Memory
value_type: int
+ description: "PCP: The amount of memory that is available for a new workload, without pushing the system into swap. Estimated from MemFree, Active(file), Inactive(file), and SReclaimable, as well as the \"low\" watermarks from /proc/zoneinfo.; available memory from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: filesys.free
+ - key: mem.util.used
applications:
- - Disk
+ - Memory
value_type: int
+ description: "PCP: Used memory is the difference between mem.physmem and mem.freemem; used memory metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
- - key: filesys.full
+ - key: mem.util.cached
applications:
- - Disk
+ - Memory
+ value_type: int
+ description: "PCP: Memory used by the page cache, including buffered file data. This is in-memory cache for files read from the disk (the pagecache) but doesn't include SwapCached.; page cache metric from /proc/meminfo"
+ multiplier: 1024
+ units: B
+
+ zdiscoveryrules:
+ - name: disc.filesys
+ key: disc.filesys
+ lifetime: 1
+ description: "Dynamically register the filesystems"
+
+ zitemprototypes:
+ - discoveryrule_key: disc.filesys
+ name: "disc.filesys.full.{#OSO_FILESYS}"
+ key: "disc.filesys.full[{#OSO_FILESYS}]"
value_type: float
-
- - key: filesys.used
+ description: "PCP filesys.full option. This is the percent full returned from pcp filesys.full"
applications:
- Disk
+
+ - discoveryrule_key: disc.filesys
+ name: "Percentage of used inodes on {#OSO_FILESYS}"
+ key: "disc.filesys.inodes.pused[{#OSO_FILESYS}]"
value_type: float
+ description: "PCP derived value of percentage of used inodes on a filesystem."
+ applications:
+ - Disk
+
+ ztriggerprototypes:
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: warn
+
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: high
+
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: warn
+
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: high
+
+ ztriggers:
+ - name: 'Too many TOTAL processes on {HOST.NAME}'
+ expression: '{Template OS Linux:proc.nprocs.last()}>5000'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_proc.asciidoc'
+ priority: warn
+
+ - name: 'Lack of available memory on {HOST.NAME}'
+ expression: '{Template OS Linux:mem.freemem.last()}<30720000'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_memory.asciidoc'
+ priority: warn
+ description: 'Alert on less than 30MegaBytes. This is 30 Million Bytes. 30000 KB x 1024'
+
+ # CPU Utilization #
+ - name: 'CPU idle less than 5% on {HOST.NAME}'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.last()}<5 and {Template OS Linux:kernel.all.cpu.idle.last(#2)}<5'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
+ priority: high
+ description: 'CPU is less than 5% idle'
+
+ - name: 'CPU idle less than 10% on {HOST.NAME}'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.last()}<10 and {Template OS Linux:kernel.all.cpu.idle.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
+ priority: warn
+ description: 'CPU is less than 10% idle'
+ dependencies:
+ - 'CPU idle less than 5% on {HOST.NAME}'
diff --git a/roles/os_zabbix/vars/template_router.yml b/roles/os_zabbix/vars/template_router.yml
deleted file mode 100644
index 4dae7da1e..000000000
--- a/roles/os_zabbix/vars/template_router.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_router:
- params:
- name: Template Router
- host: Template Router
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Router
- zitems:
- - name: Router Backends down
- hostid:
- key_: router.backends.down
- type: 2
- value_type: 0
- output: extend
- search:
- key_: router.backends.down
- ztriggers:
- - description: 'Number of router backends down on {HOST.NAME}'
- expression: '{Template Router:router.backends.down.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Number of router backends down on {HOST.NAME}'
- expandExpression: True