summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/chrony/README.md31
-rw-r--r--roles/chrony/defaults/main.yml2
-rw-r--r--roles/chrony/handlers/main.yml5
-rw-r--r--roles/chrony/meta/main.yml18
-rw-r--r--roles/chrony/tasks/main.yml30
-rw-r--r--roles/chrony/templates/chrony.conf.j245
-rw-r--r--roles/chrony/vars/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/main.yml16
-rw-r--r--roles/etcd_ca/tasks/main.yml4
-rw-r--r--roles/fluentd_master/tasks/main.yml4
-rw-r--r--roles/fluentd_node/tasks/main.yml5
-rw-r--r--roles/haproxy/defaults/main.yml4
-rw-r--r--roles/haproxy/handlers/main.yml1
-rw-r--r--roles/haproxy/tasks/main.yml5
-rw-r--r--roles/lib_timedatectl/library/timedatectl.py74
-rw-r--r--roles/lib_zabbix/library/zbx_action.py147
-rw-r--r--roles/lib_zabbix/library/zbx_host.py21
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml1
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml1
-rw-r--r--roles/nuage_master/README.md8
-rw-r--r--roles/nuage_master/files/serviceaccount.sh63
-rw-r--r--roles/nuage_master/handlers/main.yaml18
-rw-r--r--roles/nuage_master/tasks/main.yaml34
-rw-r--r--roles/nuage_master/templates/nuagekubemon.j219
-rw-r--r--roles/nuage_master/vars/main.yaml7
-rw-r--r--roles/nuage_node/README.md9
-rw-r--r--roles/nuage_node/handlers/main.yaml8
-rw-r--r--roles/nuage_node/tasks/main.yaml37
-rw-r--r--roles/nuage_node/templates/vsp-k8s.j214
-rw-r--r--roles/nuage_node/vars/main.yaml9
-rw-r--r--roles/openshift_cli/tasks/main.yml19
-rw-r--r--roles/openshift_common/tasks/main.yml13
-rw-r--r--roles/openshift_common/vars/main.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py133
-rw-r--r--roles/openshift_facts/tasks/main.yml13
-rw-r--r--roles/openshift_master/defaults/main.yml6
-rw-r--r--roles/openshift_master/handlers/main.yml13
-rw-r--r--roles/openshift_master/tasks/main.yml161
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.j2)4
l---------roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j21
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2)2
l---------roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j21
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2)0
-rw-r--r--roles/openshift_master/templates/docker/master.docker.service.j2 (renamed from roles/openshift_master/templates/master.docker.service.j2)0
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j223
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.service.j2)0
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.j2)4
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2)0
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_master_ca/tasks/main.yml10
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml44
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/os_firewall/defaults/main.yml1
-rw-r--r--roles/os_firewall/tasks/main.yml4
-rw-r--r--roles/os_zabbix/tasks/main.yml18
-rw-r--r--roles/os_zabbix/vars/template_config_loop.yml14
-rw-r--r--roles/os_zabbix/vars/template_docker.yml2
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml26
-rw-r--r--roles/oso_host_monitoring/handlers/main.yml6
-rw-r--r--roles/oso_host_monitoring/tasks/main.yml20
-rw-r--r--roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j243
-rw-r--r--roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 (renamed from roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2)50
-rw-r--r--roles/oso_monitoring_tools/README.md54
-rw-r--r--roles/oso_monitoring_tools/defaults/main.yml2
-rw-r--r--roles/oso_monitoring_tools/handlers/main.yml2
-rw-r--r--roles/oso_monitoring_tools/meta/main.yml8
-rw-r--r--roles/oso_monitoring_tools/tasks/main.yml18
-rw-r--r--roles/oso_monitoring_tools/vars/main.yml12
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml18
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
74 files changed, 1033 insertions, 386 deletions
diff --git a/roles/chrony/README.md b/roles/chrony/README.md
new file mode 100644
index 000000000..bf15d9669
--- /dev/null
+++ b/roles/chrony/README.md
@@ -0,0 +1,31 @@
+Role Name
+=========
+
+A role to configure chrony as the ntp client
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+chrony_ntp_servers: a list of ntp servers to use the chrony.conf file
+
+Dependencies
+------------
+
+roles/lib_timedatectl
+
+Example Playbook
+----------------
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml
new file mode 100644
index 000000000..95576e666
--- /dev/null
+++ b/roles/chrony/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for chrony
diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml
new file mode 100644
index 000000000..1973c79e2
--- /dev/null
+++ b/roles/chrony/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart chronyd
+ service:
+ name: chronyd
+ state: restarted
diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml
new file mode 100644
index 000000000..85595d7c3
--- /dev/null
+++ b/roles/chrony/meta/main.yml
@@ -0,0 +1,18 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: Configure chrony as an ntp server
+ company: Red Hat
+ license: Apache 2.0
+ min_ansible_version: 1.9.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - system
+dependencies:
+- roles/lib_timedatectl
diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml
new file mode 100644
index 000000000..fae6d8e4c
--- /dev/null
+++ b/roles/chrony/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: remove ntp package
+ yum:
+ name: ntp
+ state: absent
+
+- name: ensure chrony package is installed
+ yum:
+ name: chrony
+ state: installed
+
+- name: Install /etc/chrony.conf
+ template:
+ src: chrony.conf.j2
+ dest: /etc/chrony.conf
+ owner: root
+ group: root
+ mode: 0644
+ notify:
+ - Restart chronyd
+
+- name: enabled timedatectl set-ntp yes
+ timedatectl:
+ ntp: True
+
+- name:
+ service:
+ name: chronyd
+ state: started
+ enabled: yes
diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2
new file mode 100644
index 000000000..de43b6364
--- /dev/null
+++ b/roles/chrony/templates/chrony.conf.j2
@@ -0,0 +1,45 @@
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% for server in chrony_ntp_servers %}
+server {{ server }} iburst
+{% endfor %}
+
+# Ignore stratum in source selection.
+stratumweight 0
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Enable kernel RTC synchronization.
+rtcsync
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 10 seconds.
+makestep 10 3
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Listen for commands only on localhost.
+bindcmdaddress 127.0.0.1
+bindcmdaddress ::1
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+keyfile /etc/chrony.keys
+
+# Specify the key used as password for chronyc.
+commandkey 1
+
+# Generate command key if missing.
+generatecommandkey
+
+# Disable logging of client accesses.
+noclientlog
+
+# Send a message to syslog if a clock adjustment is larger than 0.5 seconds.
+logchange 0.5
+
+logdir /var/log/chrony
+#log measurements statistics tracking
diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml
new file mode 100644
index 000000000..061a21547
--- /dev/null
+++ b/roles/chrony/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for chrony
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 6e9f3a8bd..e83f72a3d 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -6,7 +6,7 @@
- cockpit-shell
- cockpit-bridge
- "{{ cockpit_plugins }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Enable cockpit-ws
service:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index e83cfc33c..1e97b047b 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -11,24 +11,8 @@
action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
-
-- name: Wait for etcd image
- command: >
- docker images
- register: docker_images
- until: openshift.etcd.etcd_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install etcd container service file
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index d32f5e48c..cf7bc00a3 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,4 +1,8 @@
---
+- name: Install openssl
+ action: "{{ ansible_pkg_mgr }} name=openssl state=present"
+ when: not openshift.common.is_atomic | bool
+
- file:
path: "{{ item }}"
state: directory
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index 1c87d562a..32f972f0a 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -1,12 +1,12 @@
---
- fail:
msg: "fluentd master is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
+ when: openshift.common.is_atomic | bool
# TODO: Update fluentd install and configuration when packaging is complete
- name: download and install td-agent
action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
index 8d34c0b19..9fd908687 100644
--- a/roles/fluentd_node/tasks/main.yml
+++ b/roles/fluentd_node/tasks/main.yml
@@ -1,12 +1,12 @@
---
- fail:
msg: "fluentd node is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
+ when: openshift.common.is_atomic | bool
# TODO: Update fluentd install and configuration when packaging is complete
- name: download and install td-agent
action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
@@ -55,4 +55,3 @@
name: 'td-agent'
state: started
enabled: yes
-
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
index 7ba5bd485..937d94209 100644
--- a/roles/haproxy/defaults/main.yml
+++ b/roles/haproxy/defaults/main.yml
@@ -1,4 +1,6 @@
---
+haproxy_frontend_port: 80
+
haproxy_frontends:
- name: main
binds:
@@ -18,4 +20,4 @@ os_firewall_allow:
- service: haproxy stats
port: "9000/tcp"
- service: haproxy balance
- port: "8443/tcp"
+ port: "{{ haproxy_frontend_port }}/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
index ee60adcab..5b8691b26 100644
--- a/roles/haproxy/handlers/main.yml
+++ b/roles/haproxy/handlers/main.yml
@@ -3,3 +3,4 @@
service:
name: haproxy
state: restarted
+ when: not (haproxy_start_result_changed | default(false) | bool)
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 97f870829..0b8370ce2 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -19,6 +19,5 @@
enabled: yes
register: start_result
-- name: Pause 30 seconds if haproxy was just started
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ haproxy_start_result_changed: "{{ start_result | changed }}"
diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py
new file mode 100644
index 000000000..b6eab5918
--- /dev/null
+++ b/roles/lib_timedatectl/library/timedatectl.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+'''
+ timedatectl ansible module
+
+ This module supports setting ntp enabled
+'''
+import subprocess
+
+
+
+
+def do_timedatectl(options=None):
+ ''' subprocess timedatectl '''
+
+ cmd = ['/usr/bin/timedatectl']
+ if options:
+ cmd += options.split()
+
+ proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE)
+ proc.wait()
+ return proc.stdout.read()
+
+def main():
+ ''' Ansible module for timedatectl
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ #state=dict(default='enabled', type='str'),
+ ntp=dict(default=True, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ # do something
+ ntp_enabled = False
+
+ results = do_timedatectl()
+
+ for line in results.split('\n'):
+ if 'NTP enabled' in line:
+ if 'yes' in line:
+ ntp_enabled = True
+
+ ########
+ # Enable NTP
+ ########
+ if module.params['ntp']:
+ if ntp_enabled:
+ module.exit_json(changed=False, results="enabled", state="enabled")
+
+ # Enable it
+ # Commands to enable ntp
+ else:
+ results = do_timedatectl('set-ntp yes')
+ module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results)
+
+ #########
+ # Disable NTP
+ #########
+ else:
+ if not ntp_enabled:
+ module.exit_json(changed=False, results="disabled", state="disabled")
+
+ results = do_timedatectl('set-ntp no')
+ module.exit_json(changed=True, results="disabled", state="disabled")
+
+ module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown")
+
+# Pylint is getting in the way of basic Ansible
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
index c08bef4f7..2f9524556 100644
--- a/roles/lib_zabbix/library/zbx_action.py
+++ b/roles/lib_zabbix/library/zbx_action.py
@@ -81,6 +81,61 @@ def filter_differences(zabbix_filters, user_filters):
return rval
+def opconditions_diff(zab_val, user_val):
+ ''' Report whether there are differences between opconditions on
+ zabbix and opconditions supplied by user '''
+
+ if len(zab_val) != len(user_val):
+ return True
+
+ for z_cond, u_cond in zip(zab_val, user_val):
+ if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
+ ['conditiontype', 'operator', 'value']]):
+ return True
+
+ return False
+
+def opmessage_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage on
+ zabbix and opmessage supplied by user '''
+
+ for op_msg_key, op_msg_val in user_val.items():
+ if zab_val[op_msg_key] != str(op_msg_val):
+ return True
+
+ return False
+
+def opmessage_grp_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage_grp
+ on zabbix and opmessage_grp supplied by user '''
+
+ zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val])
+ usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val])
+ if usr_grp_ids != zab_grp_ids:
+ return True
+
+ return False
+
+def opmessage_usr_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage_usr
+ on zabbix and opmessage_usr supplied by user '''
+
+ zab_usr_ids = set([usr['usrid'] for usr in zab_val])
+ usr_ids = set([usr['usrid'] for usr in user_val])
+ if usr_ids != zab_usr_ids:
+ return True
+
+ return False
+
+def opcommand_diff(zab_op_cmd, usr_op_cmd):
+ ''' Check whether user-provided opcommand matches what's already
+ stored in Zabbix '''
+
+ for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
+ if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
+ return True
+ return False
+
def host_in_zabbix(zab_hosts, usr_host):
''' Check whether a particular user host is already in the
Zabbix list of hosts '''
@@ -106,23 +161,11 @@ def hostlist_in_zabbix(zab_hosts, usr_hosts):
return True
-def opcommand_diff(zab_op_cmd, usr_op_cmd):
- ''' Check whether user-provided opcommand matches what's already
- stored in Zabbix '''
-
- for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
- if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
- return True
- return False
-
-# This logic is quite complex. We are comparing two lists of dictionaries.
-# The outer for-loops allow us to descend down into both lists at the same time
-# and then walk over the key,val pairs of the incoming user dict's changes
-# or updates. The if-statements are looking at different sub-object types and
-# comparing them. The other suggestion on how to write this is to write a recursive
-# compare function but for the time constraints and for complexity I decided to go
-# this route.
-# pylint: disable=too-many-branches
+# We are comparing two lists of dictionaries (the one stored on zabbix and the
+# one the user is providing). For each type of operation, determine whether there
+# is a difference between what is stored on zabbix and what the user is providing.
+# If there is a difference, we take the user-provided data for what needs to
+# be stored/updated into zabbix.
def operation_differences(zabbix_ops, user_ops):
'''Determine the differences from user and zabbix for operations'''
@@ -132,49 +175,41 @@ def operation_differences(zabbix_ops, user_ops):
rval = {}
for zab, user in zip(zabbix_ops, user_ops):
- for key, val in user.items():
- if key == 'opconditions':
- if len(zab[key]) != len(val):
- rval[key] = val
- break
- for z_cond, u_cond in zip(zab[key], user[key]):
- if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
- ['conditiontype', 'operator', 'value']]):
- rval[key] = val
- break
- elif key == 'opmessage':
- # Verify each passed param matches
- for op_msg_key, op_msg_val in val.items():
- if zab[key][op_msg_key] != str(op_msg_val):
- rval[key] = val
- break
-
- elif key == 'opmessage_grp':
- zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]])
- usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val])
- if usr_grp_ids != zab_grp_ids:
- rval[key] = val
-
- elif key == 'opmessage_usr':
- zab_usr_ids = set([usr['userid'] for usr in zab[key]])
- usr_ids = set([usr['userid'] for usr in val])
- if usr_ids != zab_usr_ids:
- rval[key] = val
-
- elif key == 'opcommand':
- if opcommand_diff(zab[key], val):
- rval[key] = val
- break
+ for oper in user.keys():
+ if oper == 'opconditions' and opconditions_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage' and opmessage_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opcommand' and opcommand_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
# opcommand_grp can be treated just like opcommand_hst
# as opcommand_grp[] is just a list of groups
- elif key == 'opcommand_hst' or key == 'opcommand_grp':
- if not hostlist_in_zabbix(zab[key], val):
- rval[key] = val
- break
+ elif oper == 'opcommand_hst' or oper == 'opcommand_grp':
+ if not hostlist_in_zabbix(zab[oper], user[oper]):
+ rval[oper] = user[oper]
+
+ # if it's any other type of operation than the ones tested above
+ # just do a direct compare
+ elif oper not in ['opconditions', 'opmessage', 'opmessage_grp',
+ 'opmessage_usr', 'opcommand', 'opcommand_hst',
+ 'opcommand_grp'] \
+ and str(zab[oper]) != str(user[oper]):
+ rval[oper] = user[oper]
- elif zab[key] != str(val):
- rval[key] = val
return rval
def get_users(zapi, users):
diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py
index e26c9caf3..560749f07 100644
--- a/roles/lib_zabbix/library/zbx_host.py
+++ b/roles/lib_zabbix/library/zbx_host.py
@@ -63,6 +63,19 @@ def get_template_ids(zapi, template_names):
template_ids.append({'templateid': content['result'][0]['templateid']})
return template_ids
+def interfaces_equal(zbx_interfaces, user_interfaces):
+ '''
+ compare interfaces from zabbix and interfaces from user
+ '''
+
+ for u_int in user_interfaces:
+ for z_int in zbx_interfaces:
+ for u_key, u_val in u_int.items():
+ if str(z_int[u_key]) != str(u_val):
+ return False
+
+ return True
+
def main():
'''
Ansible module for zabbix host
@@ -120,8 +133,9 @@ def main():
'dns': '', # dns for host
'port': '10050', # port for interface? 10050
}]
+ hostgroup_names = list(set(module.params['hostgroup_names']))
params = {'host': hname,
- 'groups': get_group_ids(zapi, module.params['hostgroup_names']),
+ 'groups': get_group_ids(zapi, hostgroup_names),
'templates': get_template_ids(zapi, module.params['template_names']),
'interfaces': ifs,
}
@@ -140,6 +154,11 @@ def main():
if zab_results['parentTemplates'] != value:
differences[key] = value
+
+ elif key == "interfaces":
+ if not interfaces_equal(zab_results[key], value):
+ differences[key] = value
+
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 47749389e..61344357a 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -57,6 +57,7 @@
expression: "{{ item.expression }}"
priority: "{{ item.priority }}"
url: "{{ item.url | default(None, True) }}"
+ status: "{{ item.status | default('', True) }}"
with_items: template.ztriggers
when: template.ztriggers is defined
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 0a0cf1fae..e2c51a903 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -1,6 +1,7 @@
---
- name: nickhammond.logrotate | Install logrotate
action: "{{ ansible_pkg_mgr }} name=logrotate state=present"
+ when: not openshift.common.is_atomic | bool
- name: nickhammond.logrotate | Setup logrotate.d scripts
template:
diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md
new file mode 100644
index 000000000..de101dd19
--- /dev/null
+++ b/roles/nuage_master/README.md
@@ -0,0 +1,8 @@
+Nuage Master
+============
+Setup Nuage Kubernetes Monitor on the Master node
+
+
+Requirements
+------------
+This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh
new file mode 100644
index 000000000..f6fdb8a8d
--- /dev/null
+++ b/roles/nuage_master/files/serviceaccount.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Parse CLI options
+for i in "$@"; do
+ case $i in
+ --master-cert-dir=*)
+ MASTER_DIR="${i#*=}"
+ CA_CERT=${MASTER_DIR}/ca.crt
+ CA_KEY=${MASTER_DIR}/ca.key
+ CA_SERIAL=${MASTER_DIR}/ca.serial.txt
+ ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig
+ ;;
+ --server=*)
+ SERVER="${i#*=}"
+ ;;
+ --output-cert-dir=*)
+ OUTDIR="${i#*=}"
+ CONFIG_FILE=${OUTDIR}/nuage.kubeconfig
+ ;;
+ esac
+done
+
+# If any are missing, print the usage and exit
+if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then
+ echo "Invalid syntax: $@"
+ echo "Usage:"
+ echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/"
+ echo "--master-cert-dir: Directory where the master's configuration is held"
+ echo "--server: Address of Kubernetes API server (default port is 8443)"
+ echo "--output-cert-dir: Directory to put artifacts in"
+ echo ""
+ echo "All options are required"
+ exit 1
+fi
+
+# Login as admin so that we can create the service account
+oc login -u system:admin --config=$ADMIN_FILE || exit 1
+oc project default --config=$ADMIN_FILE
+
+ACCOUNT_CONFIG='
+{
+ "apiVersion": "v1",
+ "kind": "ServiceAccount",
+ "metadata": {
+ "name": "nuage"
+ }
+}
+'
+
+# Create the account with the included info
+echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f -
+
+# Add the cluser-reader role, which allows this service account read access to
+# everything in the cluster except secrets
+oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE
+
+# Generate certificates and a kubeconfig for the service account
+oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage'
+
+# Verify the finalized kubeconfig
+if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then
+ echo "Service account creation failed!"
+ exit 1
+fi
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
new file mode 100644
index 000000000..635d8a419
--- /dev/null
+++ b/roles/nuage_master/handlers/main.yaml
@@ -0,0 +1,18 @@
+---
+- name: restart nuagekubemon
+ sudo: true
+ service: name=nuagekubemon state=restarted
+
+- name: restart master
+ service: name={{ openshift.common.service_type }}-master state=restarted
+ when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
new file mode 100644
index 000000000..a7baadc76
--- /dev/null
+++ b/roles/nuage_master/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: Create directory /usr/share/nuagekubemon
+ sudo: true
+ file: path=/usr/share/nuagekubemon state=directory
+
+- name: Create the log directory
+ sudo: true
+ file: path={{ nuagekubemon_log_dir }} state=directory
+
+- name: Install Nuage Kubemon
+ sudo: true
+ yum: name={{ nuage_kubemon_rpm }} state=present
+
+- name: Run the service account creation script
+ sudo: true
+ script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }}
+
+- name: Download the certs and keys
+ sudo: true
+ fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
+- name: Create nuagekubemon.yaml
+ sudo: true
+ template: src=nuagekubemon.j2 dest=/usr/share/nuagekubemon/nuagekubemon.yaml owner=root mode=0644
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
+ - restart nuagekubemon
diff --git a/roles/nuage_master/templates/nuagekubemon.j2 b/roles/nuage_master/templates/nuagekubemon.j2
new file mode 100644
index 000000000..fb586bcee
--- /dev/null
+++ b/roles/nuage_master/templates/nuagekubemon.j2
@@ -0,0 +1,19 @@
+# .kubeconfig that includes the nuage service account
+kubeConfig: {{ kube_config }}
+# name of the nuage service account, or another account with 'cluster-reader'
+# permissions
+# Openshift master config file
+openshiftMasterConfig: {{ master_config_yaml }}
+# URL of the VSD Architect
+vsdApiUrl: {{ vsd_api_url }}
+# API version to query against. Usually "v3_2"
+vspVersion: {{ vsp_version }}
+# File containing a VSP license to install. Only necessary if no license has
+# been installed on the VSD Architect before, only valid for standalone vsd install
+# licenseFile: "/path/to/base_vsp_license.txt"
+# Name of the enterprise in which pods will reside
+enterpriseName: {{ enterprise }}
+# Name of the domain in which pods will reside
+domainName: {{ domain }}
+# Location where logs should be saved
+log_dir: {{ nuagekubemon_log_dir }}
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
new file mode 100644
index 000000000..db901fea6
--- /dev/null
+++ b/roles/nuage_master/vars/main.yaml
@@ -0,0 +1,7 @@
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+cert_output_dir: /usr/share/nuagekubemon
+kube_config: /usr/share/nuagekubemon/nuage.kubeconfig
+kubemon_yaml: /usr/share/nuagekubemon/nuagekubemon.yaml
+master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md
new file mode 100644
index 000000000..02a3cbc77
--- /dev/null
+++ b/roles/nuage_node/README.md
@@ -0,0 +1,9 @@
+Nuage Node
+==========
+
+Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node
+
+Requirements
+------------
+
+This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
new file mode 100644
index 000000000..25482a845
--- /dev/null
+++ b/roles/nuage_node/handlers/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: restart vrs
+ sudo: true
+ service: name=openvswitch state=restarted
+
+- name: restart node
+ sudo: true
+ service: name={{ openshift.common.service_type }}-node state=restarted
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
new file mode 100644
index 000000000..e0117bf71
--- /dev/null
+++ b/roles/nuage_node/tasks/main.yaml
@@ -0,0 +1,37 @@
+---
+- name: Install Nuage VRS
+ sudo: true
+ yum: name={{ vrs_rpm }} state=present
+
+- name: Set the uplink interface
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
+
+- name: Set the Active Controller
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
+
+- name: Set the Standby Controller
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
+ when: vsc_standby_ip is defined
+
+- name: Install plugin rpm
+ sudo: true
+ yum: name={{ plugin_rpm }} state=present
+
+- name: Copy the certificates and keys
+ sudo: true
+ copy: src="/tmp/{{ item }}" dest="{{ vsp_k8s_dir }}/{{ item }}"
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
+- name: Set the vsp-k8s.yaml
+ sudo: true
+ template: src=vsp-k8s.j2 dest={{ vsp_k8s_yaml }} owner=root mode=0644
+ notify:
+ - restart vrs
+ - restart node
diff --git a/roles/nuage_node/templates/vsp-k8s.j2 b/roles/nuage_node/templates/vsp-k8s.j2
new file mode 100644
index 000000000..98d6c3a9c
--- /dev/null
+++ b/roles/nuage_node/templates/vsp-k8s.j2
@@ -0,0 +1,14 @@
+clientCert: {{ client_cert }}
+# The key to the certificate in clientCert above
+clientKey: {{ client_key }}
+# The certificate authority's certificate for the local kubelet. Usually the
+# same as the CA cert used to create the client Cert/Key pair.
+CACert: {{ ca_cert }}
+# Name of the enterprise in which pods will reside
+enterpriseName: {{ enterprise }}
+# Name of the domain in which pods will reside
+domainName: {{ domain }}
+# IP address and port number of master API server
+masterApiServer: {{ api_server }}
+# Bridge name for the docker bridge
+dockerBridgeName: {{ docker_bridge }}
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
new file mode 100644
index 000000000..a6b7cf997
--- /dev/null
+++ b/roles/nuage_node/vars/main.yaml
@@ -0,0 +1,9 @@
+---
+vrs_config: /etc/default/openvswitch
+vsp_k8s_dir: /usr/share/vsp-k8s
+vsp_k8s_yaml: "{{ vsp_k8s_dir }}/vsp-k8s.yaml"
+client_cert: "{{ vsp_k8s_dir }}/nuage.crt"
+client_key: "{{ vsp_k8s_dir }}/nuage.key"
+ca_cert: "{{ vsp_k8s_dir }}/ca.crt"
+api_server: "{{ openshift_node_master_api_url }}"
+docker_bridge: "docker0"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 8d7686ffd..a6b6b1925 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -3,32 +3,17 @@
role: common
local_facts:
deployment_type: "{{ openshift_deployment_type }}"
+ cli_image: "{{ osm_image | default(None) }}"
- name: Install clients
- yum: pkg={{ openshift.common.service_type }}-clients state=installed
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-clients state=present"
when: not openshift.common.is_containerized | bool
-- name: List Docker images
- command: >
- docker images
- register: docker_images
-
- name: Pull CLI Image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
-
-- name: Wait for CLI image
- command: >
- docker images
- register: docker_images
- until: openshift.common.cli_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
- name: Create /usr/local/bin/openshift cli wrapper
template:
src: openshift.j2
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 0ee873a2b..ff8c3b50f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -4,6 +4,14 @@
when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
- fail:
+ msg: Nuage sdn can not be used with openshift sdn
+ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
@@ -23,7 +31,9 @@
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+ use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
+ data_dir: "{{ openshift_data_dir | default(None) }}"
- name: Install the base package for versioning
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present"
@@ -38,5 +48,6 @@
set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
- name: Set hostname
- hostname: name={{ openshift.common.hostname }}
+ command: >
+ hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 50816d319..b163f8aae 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,3 +5,4 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 2a3d4acbd..40e54d706 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -188,9 +188,6 @@ def normalize_gce_facts(metadata, facts):
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
- # Default to no sdn for GCE deployments
- facts['use_openshift_sdn'] = False
-
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
@@ -341,6 +338,23 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+def set_nuage_facts_if_unset(facts):
+ """ Set nuage facts if not already present in facts dict
+ dict: the facts dict updated with the nuage facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the nuage
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_nuage' not in facts['common']:
+ use_nuage = False
+ facts['common']['use_nuage'] = use_nuage
+ return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -461,52 +475,68 @@ def set_url_facts_if_unset(facts):
were not already present
"""
if 'master' in facts:
- api_use_ssl = facts['master']['api_use_ssl']
- api_port = facts['master']['api_port']
- console_use_ssl = facts['master']['console_use_ssl']
- console_port = facts['master']['console_port']
- console_path = facts['master']['console_path']
- etcd_use_ssl = facts['master']['etcd_use_ssl']
- etcd_hosts = facts['master']['etcd_hosts']
- etcd_port = facts['master']['etcd_port']
hostname = facts['common']['hostname']
- public_hostname = facts['common']['public_hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
+ public_hostname = facts['common']['public_hostname']
+ api_hostname = cluster_hostname if cluster_hostname else hostname
+ api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
+ console_path = facts['master']['console_path']
+ etcd_hosts = facts['master']['etcd_hosts']
+
+ use_ssl = dict(
+ api=facts['master']['api_use_ssl'],
+ public_api=facts['master']['api_use_ssl'],
+ loopback_api=facts['master']['api_use_ssl'],
+ console=facts['master']['console_use_ssl'],
+ public_console=facts['master']['console_use_ssl'],
+ etcd=facts['master']['etcd_use_ssl']
+ )
+
+ ports = dict(
+ api=facts['master']['api_port'],
+ public_api=facts['master']['api_port'],
+ loopback_api=facts['master']['api_port'],
+ console=facts['master']['console_port'],
+ public_console=facts['master']['console_port'],
+ etcd=facts['master']['etcd_port'],
+ )
+
+ etcd_urls = []
+ if etcd_hosts != '':
+ facts['master']['etcd_port'] = ports['etcd']
+ facts['master']['embedded_etcd'] = False
+ for host in etcd_hosts:
+ etcd_urls.append(format_url(use_ssl['etcd'], host,
+ ports['etcd']))
+ else:
+ etcd_urls = [format_url(use_ssl['etcd'], hostname,
+ ports['etcd'])]
+
+ facts['master'].setdefault('etcd_urls', etcd_urls)
+
+ prefix_hosts = [('api', api_hostname),
+ ('public_api', api_public_hostname),
+ ('loopback_api', hostname)]
+
+ for prefix, host in prefix_hosts:
+ facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
+ host,
+ ports[prefix]))
+
+
+ r_lhn = "{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
+ facts['master'].setdefault('loopback_cluster_name', r_lhn)
+ facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
+ facts['master'].setdefault('loopback_user', "system:openshift-master/{0}".format(r_lhn))
+
+ prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
+ for prefix, host in prefix_hosts:
+ facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
+ host,
+ ports[prefix],
+ console_path))
- if 'etcd_urls' not in facts['master']:
- etcd_urls = []
- if etcd_hosts != '':
- facts['master']['etcd_port'] = etcd_port
- facts['master']['embedded_etcd'] = False
- for host in etcd_hosts:
- etcd_urls.append(format_url(etcd_use_ssl, host,
- etcd_port))
- else:
- etcd_urls = [format_url(etcd_use_ssl, hostname,
- etcd_port)]
- facts['master']['etcd_urls'] = etcd_urls
- if 'api_url' not in facts['master']:
- api_hostname = cluster_hostname if cluster_hostname else hostname
- facts['master']['api_url'] = format_url(api_use_ssl, api_hostname,
- api_port)
- if 'public_api_url' not in facts['master']:
- api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
- facts['master']['public_api_url'] = format_url(api_use_ssl,
- api_public_hostname,
- api_port)
- if 'console_url' not in facts['master']:
- console_hostname = cluster_hostname if cluster_hostname else hostname
- facts['master']['console_url'] = format_url(console_use_ssl,
- console_hostname,
- console_port,
- console_path)
- if 'public_console_url' not in facts['master']:
- console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
- facts['master']['public_console_url'] = format_url(console_use_ssl,
- console_public_hostname,
- console_port,
- console_path)
return facts
def set_aggregate_facts(facts):
@@ -884,10 +914,6 @@ def apply_provider_facts(facts, provider_facts):
if not provider_facts:
return facts
- use_openshift_sdn = provider_facts.get('use_openshift_sdn')
- if isinstance(use_openshift_sdn, bool):
- facts['common']['use_openshift_sdn'] = use_openshift_sdn
-
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
@@ -1038,6 +1064,10 @@ def set_container_facts_if_unset(facts):
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
+ if facts['common']['is_containerized']:
+ facts['common']['admin_binary'] = '/usr/local/bin/oadm'
+ facts['common']['client_binary'] = '/usr/local/bin/oc'
+
return facts
@@ -1078,7 +1108,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd', 'nfs']
+ known_roles = ['common', 'master', 'node', 'etcd', 'nfs']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
@@ -1115,6 +1145,7 @@ class OpenShiftFacts(object):
facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
+ facts = set_nuage_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
@@ -1156,7 +1187,7 @@ class OpenShiftFacts(object):
defaults['common'] = common
if 'master' in roles:
- master = dict(api_use_ssl=True, api_port='8443',
+ master = dict(api_use_ssl=True, api_port='8443', controllers_port='8444',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=True, etcd_hosts='',
etcd_port='4001', portal_net='172.30.0.0/16',
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 87fa99a3b..0dbac1b54 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,15 +1,14 @@
---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
- assert:
- that:
- - ansible_version | version_compare('1.8.0', 'ge')
- - ansible_version | version_compare('1.9.0', 'ne')
- - ansible_version | version_compare('1.9.0.1', 'ne')
-
+- name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found"
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+
- name: Detecting Operating System
shell: ls /run/ostree-booted
ignore_errors: yes
failed_when: false
+ changed_when: false
register: ostree_output
# Locally setup containerized facts for now
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 9766d01ae..1f74d851a 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -6,7 +6,9 @@ os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
- service: api server https
- port: 8443/tcp
+ port: "{{ openshift.master.api_port }}/tcp"
+- service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
- service: dns tcp
port: 53/tcp
- service: dns udp
@@ -24,7 +26,5 @@ os_firewall_allow:
os_firewall_deny:
- service: api server http
port: 8080/tcp
-- service: former web console port
- port: 8444/tcp
- service: former etcd peer port
port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e1b95eda4..6b9992eea 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,11 +2,24 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
- name: restart master api
service: name={{ openshift.common.service_type }}-master-api state=restarted
when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
- name: restart master controllers
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 397122631..57b50bee4 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -42,7 +42,12 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}"
metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}"
- etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}"
+ logout_url: "{{ openshift_master_logout_url | default(None) }}"
+ extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}"
+ extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}"
+ extensions: "{{ openshift_master_extensions | default(None) }}"
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}"
+ etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
@@ -51,6 +56,7 @@
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
+ pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
@@ -86,36 +92,20 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull master image
command: >
docker pull {{ openshift.master.master_image }}
- when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout
-
-- name: Wait for master image
- command: >
- docker images
- register: docker_images
- until: openshift.master.master_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install Master docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
- src: master.docker.service.j2
+ src: docker/master.docker.service.j2
register: install_result
when: openshift.common.is_containerized | bool and not openshift_master_ha | bool
-
+
- name: Create openshift.common.data_dir
- file:
+ file:
path: "{{ openshift.common.data_dir }}"
state: directory
mode: 0755
@@ -190,31 +180,42 @@
when: openshift.common.is_containerized | bool
# workaround for missing systemd unit files for controllers/api
-- name: Create the api service file
+- name: Create the systemd unit files
template:
- src: atomic-openshift-master-api{{ ha_suffix }}.service.j2
- dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-api.service"
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
+ dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the controllers service file
- template:
- src: atomic-openshift-master-controllers{{ ha_suffix }}.service.j2
- dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-controllers.service"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the api env file
+ with_items:
+ - api
+ - controllers
+ register: create_unit_files
+
+- command: systemctl daemon-reload
+ when: create_unit_files | changed
+# end workaround for missing systemd unit files
+
+- name: Create the master api service env file
template:
- src: atomic-openshift-master-api.j2
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
- force: no
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the controllers env file
+ notify:
+ - restart master api
+
+- name: Create the master controllers service env file
template:
- src: atomic-openshift-master-controllers.j2
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
- force: no
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- command: systemctl daemon-reload
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-# end workaround for missing systemd unit files
+ notify:
+ - restart master controllers
+
+- name: Create the master service env file
+ template:
+ src: "atomic-openshift-master.j2"
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
+ notify:
+ - restart master
- name: Create session secrets file
template:
@@ -239,52 +240,42 @@
- restart master api
- restart master controllers
-- name: Configure master settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: yes
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- notify:
- - restart master
-
-- name: Configure master api settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
- notify:
- - restart master api
-
-- name: Configure master controller settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
- notify:
- - restart master controllers
+- name: Test local loopback context
+ command: >
+ {{ openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+
+- command: >
+ {{ openshift.common.client_binary }} config set-cluster
+ --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ --embed-certs=true --server={{ openshift.master.loopback_api_url }}
+ {{ openshift.master.loopback_cluster_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: loopback_context_string not in loopback_config.stdout
+ register: set_loopback_cluster
+
+- command: >
+ {{ openshift.common.client_binary }} config set-context
+ --cluster={{ openshift.master.loopback_cluster_name }}
+ --namespace=default --user={{ openshift.master.loopback_user }}
+ {{ openshift.master.loopback_context_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: set_loopback_cluster | changed
+ register: set_loopback_context
+
+- command: >
+ {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: set_loopback_context | changed
+ register: set_current_context
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
register: start_result
+ notify: Verify API Server
- name: Stop and disable non HA master when running HA
service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
@@ -303,6 +294,20 @@
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+# A separate wait is required here for native HA since notifies will
+# be resolved after all tasks in the role.
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+
- name: Start and enable master controller
service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 205934248..81bae5470 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,5 +1,5 @@
-OPTIONS=
-CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+OPTIONS=--loglevel={{ openshift.master.debug_level }}
+CONFIG_FILE={{ openshift_master_config_file }}
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2
new file mode 120000
index 000000000..4bb7095ee
--- /dev/null
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2
@@ -0,0 +1 @@
+../native-cluster/atomic-openshift-master-api.j2 \ No newline at end of file
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 936c39edf..a935b82f6 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -23,4 +23,4 @@ Restart=always
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service \ No newline at end of file
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2
new file mode 120000
index 000000000..8714ebbae
--- /dev/null
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2
@@ -0,0 +1 @@
+../native-cluster/atomic-openshift-master-controllers.j2 \ No newline at end of file
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 6ba7d6e2a..6ba7d6e2a 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
diff --git a/roles/openshift_master/templates/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2
index 23781a313..23781a313 100644
--- a/roles/openshift_master/templates/master.docker.service.j2
+++ b/roles/openshift_master/templates/docker/master.docker.service.j2
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 647476b7f..1eeab46fe 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -5,7 +5,7 @@ apiLevels:
- v1
apiVersion: v1
assetConfig:
- logoutURL: ""
+ logoutURL: "{{ openshift.master.logout_url | default('') }}"
masterPublicURL: {{ openshift.master.public_api_url }}
publicURL: {{ openshift.master.public_console_url }}/
{% if 'logging_public_url' in openshift.master %}
@@ -14,6 +14,15 @@ assetConfig:
{% if 'metrics_public_url' in openshift.master %}
metricsPublicURL: {{ openshift.master.metrics_public_url }}
{% endif %}
+{% if 'extension_scripts' in openshift.master %}
+ extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extension_stylesheets' in openshift.master %}
+ extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extensions' in openshift.master %}
+ extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }}
+{% endif %}
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
bindNetwork: tcp4
@@ -87,11 +96,11 @@ kubernetesMasterConfig:
- v1beta3
- v1
{% endif %}
- apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_json }}
- controllerArguments: {{ openshift.master.controller_args | default(None) | to_json }}
+ apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+ controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
- podEvictionTimeout: ""
+ podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
@@ -108,12 +117,16 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
oauthConfig:
+{% if 'oauth_template' in openshift.master %}
+ templates:
+ login: {{ openshift.master.oauth_template }}
+{% endif %}
assetPublicURL: {{ openshift.master.public_console_url }}/
grantConfig:
method: {{ openshift.master.oauth_grant_method }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..48bfa5f04
--- /dev/null
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+CONFIG_FILE={{ openshift_master_config_file }}
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index ba19fb348..ba19fb348 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 205934248..cdc56eece 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,5 @@
-OPTIONS=
-CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+CONFIG_FILE={{ openshift_master_config_file }}
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index e6e97b24f..e6e97b24f 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 534465451..fe88c3c16 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,11 +1,16 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+ha_svc_template_path: "{{ 'docker-cluster' if openshift.common.is_containerized | bool else 'native-cluster' }}"
+ha_svc_svc_dir: "{{ '/etc/systemd/system' if openshift.common.is_containerized | bool else '/usr/lib/systemd/system' }}"
+
openshift_master_valid_grant_methods:
- auto
- prompt
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 5b4c92f2b..6d9be81c0 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -13,16 +13,10 @@
path: "{{ openshift_master_config_dir }}"
state: directory
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
-- name: Pull required docker image
+- name: Pull master docker image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+ when: openshift.common.is_containerized | bool
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
index 7ab9afb51..1b94598dd 100644
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ b/roles/openshift_master_cluster/tasks/configure.yml
@@ -34,11 +34,10 @@
- name: Disable stonith
command: pcs property set stonith-enabled=false
-# TODO: handle case where api port is not 8443
- name: Wait for the clustered master service to be available
wait_for:
host: "{{ openshift_master_cluster_vip }}"
- port: 8443
+ port: "{{ openshift.master.api_port }}"
state: started
timeout: 180
delay: 90
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 33852d7f8..9035248f9 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,41 +44,14 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull node image
command: >
docker pull {{ openshift.node.node_image }}
- when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout
-
-- name: Wait for node image
- command: >
- docker images
- register: docker_images
- until: openshift.node.node_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
+
- name: Pull OpenVSwitch image
command: >
docker pull {{ openshift.node.ovs_image }}
- when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout
- and openshift.common.use_openshift_sdn | bool
-
-- name: Wait for OpenVSwitch image
- command: >
- docker images
- register: docker_images
- until: openshift.node.ovs_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
- name: Install Node docker service file
@@ -130,6 +103,21 @@
- name: Additional storage plugin configuration
include: storage_plugins/main.yml
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift_node_master_api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
+
- name: Start and enable node
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 1edf21d9b..14a613786 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -1,4 +1,8 @@
---
+- name: Install NFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ when: not openshift.common.is_atomic | bool
+
- name: Set seboolean to allow nfs storage plugin access from containers
seboolean:
name: virt_use_nfs
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 23bd81f91..44065f4bd 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -11,9 +11,7 @@ imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
kind: NodeConfig
-{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %}
-kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
-{% endif %}
+kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
@@ -22,7 +20,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index bcf1d9a34..e3176e611 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,2 +1,3 @@
---
+os_firewall_enabled: True
os_firewall_use_firewalld: True
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index ad89ef97c..076e5e311 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -1,6 +1,6 @@
---
- include: firewall/firewalld.yml
- when: os_firewall_use_firewalld
+ when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool
- include: firewall/iptables.yml
- when: not os_firewall_use_firewalld
+ when: os_firewall_enabled | bool and not os_firewall_use_firewalld | bool
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index a8b65dd56..1c8d88854 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "Zabbix config is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
-
- name: Main List all templates
zbx_template:
zbx_server: "{{ ozb_server }}"
@@ -45,6 +41,10 @@
tags:
- zagg_server
+- include_vars: template_config_loop.yml
+ tags:
+ - config_loop
+
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
vars:
@@ -154,3 +154,13 @@
password: "{{ ozb_password }}"
tags:
- zagg_server
+
+- name: Include Template Config Loop
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_config_loop }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+ tags:
+ - config_loop
diff --git a/roles/os_zabbix/vars/template_config_loop.yml b/roles/os_zabbix/vars/template_config_loop.yml
new file mode 100644
index 000000000..823da1868
--- /dev/null
+++ b/roles/os_zabbix/vars/template_config_loop.yml
@@ -0,0 +1,14 @@
+---
+g_template_config_loop:
+ name: Template Config Loop
+ zitems:
+ - key: config_loop.run.exit_code
+ applications:
+ - Config Loop
+ value_type: int
+
+ ztriggers:
+ - name: 'config_loop.run.exit_code not zero on {HOST.NAME}'
+ expression: '{Template Config Loop:config_loop.run.exit_code.min(#2)}>0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_config_loop.asciidoc'
+ priority: average
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
index a05e552e3..dd13e76f7 100644
--- a/roles/os_zabbix/vars/template_docker.yml
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -72,10 +72,12 @@ g_template_docker:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc'
priority: high
+ # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6)
- name: 'docker.container.dns.resolution failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc'
priority: average
+ status: disabled
- name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0'
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index a0ba8d104..12ea36c8b 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -98,6 +98,18 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.skydns.port.open
+ description: State of the SkyDNS port open and listening
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.skydns.query
+ description: SkyDNS can be queried or not
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.etcd.create.success
description: Show number of successful create actions
type: int
@@ -305,6 +317,20 @@ g_template_openshift_master:
- 'Openshift Master process not running on {HOST.NAME}'
priority: high
+ - name: 'SkyDNS port not listening on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'SkyDNS query failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.skydns.query.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master API health check is failing on {HOST.NAME}'
+ priority: high
+
zgraphs:
- name: Openshift Master API Server Latency Pods LIST Quantiles
width: 900
diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml
index 7863ad15b..3a5d8024c 100644
--- a/roles/oso_host_monitoring/handlers/main.yml
+++ b/roles/oso_host_monitoring/handlers/main.yml
@@ -4,9 +4,3 @@
name: "{{ osohm_host_monitoring }}"
state: restarted
enabled: yes
-
-- name: "Restart the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: restarted
- enabled: yes
diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml
index 6ddfa3dcb..a0a453416 100644
--- a/roles/oso_host_monitoring/tasks/main.yml
+++ b/roles/oso_host_monitoring/tasks/main.yml
@@ -5,7 +5,6 @@
with_items:
- osohm_zagg_web_url
- osohm_host_monitoring
- - osohm_zagg_client
- osohm_docker_registry_url
- osohm_default_zagg_server_user
- osohm_default_zagg_server_password
@@ -37,29 +36,12 @@
- "Restart the {{ osohm_host_monitoring }} service"
register: systemd_host_monitoring
-- name: "Copy {{ osohm_zagg_client }} systemd file"
- template:
- src: "{{ osohm_zagg_client }}.service.j2"
- dest: "/etc/systemd/system/{{ osohm_zagg_client }}.service"
- owner: root
- group: root
- mode: 0644
- notify:
- - "Restart the {{ osohm_zagg_client }} service"
- register: zagg_systemd
-
- name: reload systemd
command: /usr/bin/systemctl --system daemon-reload
- when: systemd_host_monitoring | changed or zagg_systemd | changed
+ when: systemd_host_monitoring | changed
- name: "Start the {{ osohm_host_monitoring }} service"
service:
name: "{{ osohm_host_monitoring }}"
state: started
enabled: yes
-
-- name: "Start the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: started
- enabled: yes
diff --git a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
deleted file mode 100644
index d18ad90fe..000000000
--- a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable pcp-docker
-# systemctl start pcp-docker
-#
-#
-[Unit]
-Description=PCP Collector Contatainer
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Environment=HOME=/etc/docker/ops
-#Slice=container-small.slice
-
-# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
-
-
-ExecStart=/usr/bin/docker run --rm --name="{{ osohm_host_monitoring }}" \
- --privileged --net=host --pid=host --ipc=host \
- -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \
- -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \
- -v /var/log:/var/log \
- {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-
-ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-Restart=always
-RestartSec=30
-
-[Install]
-WantedBy=default.target
diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
index bcc8a5e03..ac950b4e5 100644
--- a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2
+++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
@@ -4,12 +4,12 @@
# * place this file in /etc/systemd/system without the .systemd extension
# * run the commands:
# systemctl daemon-reload
-# systemctl enable zagg-client-docker
-# systemctl start zagg-client-docker
+# systemctl enable oso-rhel7-host-monitoring
+# systemctl start oso-rhel7-host-monitoring
#
#
[Unit]
-Description=Zagg Client Contatainer
+Description=Openshift Host Monitoring Container
Requires=docker.service
After=docker.service
@@ -21,40 +21,54 @@ Environment=HOME=/etc/docker/ops
#Slice=container-small.slice
# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_zagg_client }}"
+ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
+# mwoodson note 1-7-16:
+# pcp recommends mounting /run in their Dockerfile
+# /run conflicts with cron which also runs in this container.
+# I am leaving /run out for now. the guys in #pcp said that they mounted /run
+# to shared the pcp socket that is created in /run. We are not using this,
+# as far as I know.
+# This problem goes away with systemd being run in the containers and not using
+# cron but using systemd timers
+# -v /run:/run \
-ExecStart=/usr/bin/docker run --name {{ osohm_zagg_client }} \
+ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} \
--privileged \
--pid=host \
--net=host \
- -e ZAGG_URL={{ osohm_zagg_web_url }} \
- -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
- -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
+ --ipc=host \
+ -e ZAGG_URL={{ osohm_zagg_web_url }} \
+ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
+ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
-e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \
- -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
+ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
-e OSO_CLUSTER_GROUP={{ cluster_group }} \
- -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_ENVIRONMENT={{ oo_environment }} \
-e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \
-e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \
+ -e OSO_MASTER_HA={{ osohm_master_ha }} \
-v /etc/localtime:/etc/localtime \
- -v /run/pcp:/run/pcp \
+ -v /sys:/sys:ro \
+ -v /sys/fs/selinux \
+ -v /var/lib/docker:/var/lib/docker:ro \
-v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/run/openvswitch:/var/run/openvswitch \
+ -v /var/run/openvswitch:/var/run/openvswitch \
{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %}
-v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \
-v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \
-v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \
-v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \
{% endif %}
- {{ osohm_docker_registry_url }}{{ osohm_zagg_client }}
+ {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-ExecReload=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
+ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
+ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
Restart=always
RestartSec=30
diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md
new file mode 100644
index 000000000..4215f9eeb
--- /dev/null
+++ b/roles/oso_monitoring_tools/README.md
@@ -0,0 +1,54 @@
+Role Name
+=========
+
+This role will install the Openshift Monitoring Utilities
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+osomt_zagg_client_config
+
+from vars/main.yml:
+
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+- role: "oso_monitoring_tools"
+ osomt_host_name: hostname
+ osomt_zagg_url: http://path.to/zagg_web
+ osomt_zagg_user: admin
+ osomt_zagg_password: password
+ osomt_zagg_ssl_verify: True
+ osomt_zagg_verbose: False
+ osomt_zagg_debug: False
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml
new file mode 100644
index 000000000..a17424f25
--- /dev/null
+++ b/roles/oso_monitoring_tools/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml
new file mode 100644
index 000000000..cefa780ab
--- /dev/null
+++ b/roles/oso_monitoring_tools/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml
new file mode 100644
index 000000000..9c42b68dc
--- /dev/null
+++ b/roles/oso_monitoring_tools/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: OpenShift Operations
+ description: Install Openshift Monitoring tools
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml
new file mode 100644
index 000000000..c90fc56e2
--- /dev/null
+++ b/roles/oso_monitoring_tools/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for oso_monitoring_tools
+- name: Install the Openshift Tools RPMS
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - openshift-tools-scripts-monitoring-zagg-client
+ - python-openshift-tools-monitoring-zagg
+ - python-openshift-tools-monitoring-zabbix
+
+- debug: var=g_zagg_client_config
+
+- name: Generate the /etc/openshift_tools/zagg_client.yaml config file
+ copy:
+ content: "{{ osomt_zagg_client_config | to_nice_yaml }}"
+ dest: /etc/openshift_tools/zagg_client.yaml
+ mode: "644"
diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml
new file mode 100644
index 000000000..3538ba30b
--- /dev/null
+++ b/roles/oso_monitoring_tools/vars/main.yml
@@ -0,0 +1,12 @@
+---
+# vars file for oso_monitoring_tools
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index e9e6e4bd4..08540f440 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -2,8 +2,24 @@
- name: Disable all repositories
command: subscription-manager repos --disable="*"
+- set_fact:
+ default_ose_version: '3.0'
+ when: deployment_type == 'enterprise'
+
+- set_fact:
+ default_ose_version: '3.1'
+ when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+
+- set_fact:
+ ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
+
+- fail:
+ msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1'] )
+
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-3.0-rpms"
+ --enable="rhel-7-server-ose-{{ ose_version }}-rpms"
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index c160ea4e9..eecfd04a0 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,4 +41,4 @@
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
- include: enterprise.yml
- when: deployment_type == 'enterprise'
+ when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]