diff options
32 files changed, 673 insertions, 92 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index d85882bf9..4ec54c846 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.32-1 ./ +3.0.35-1 ./ diff --git a/README_AWS.md b/README_AWS.md index f8ecaec49..c605de43d 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. ``` Host *.compute-1.amazonaws.com - PrivateKey $HOME/.ssh/my_private_key.pem + IdentityFile $HOME/.ssh/my_private_key.pem ``` Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc index 09d4839c7..72eaedcf9 100644 --- a/docs/style_guide.adoc +++ b/docs/style_guide.adoc @@ -19,9 +19,10 @@ This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. * https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length] ''' +[[All-lines-SHOULD-be-no-longer-than-80-characters]] [cols="2v,v"] |=== -| **Rule** +| <<All-lines-SHOULD-be-no-longer-than-80-characters, Rule>> | All lines SHOULD be no longer than 80 characters. |=== @@ -31,9 +32,10 @@ Code readability is subjective, therefore pull-requests SHOULD still be merged, ''' +[[All-lines-MUST-be-no-longer-than-120-characters]] [cols="2v,v"] |=== -| **Rule** +| <<All-lines-MUST-be-no-longer-than-120-characters, Rule>> | All lines MUST be no longer than 120 characters. |=== @@ -46,9 +48,10 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di === Ansible Yaml file extension ''' +[[All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc]] [cols="2v,v"] |=== -| **Rule** +| <<All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc, Rule>> | All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc). |=== @@ -59,9 +62,10 @@ Example: `tasks.yml` === Ansible CLI Variables ''' +[[Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli]] [cols="2v,v"] |=== -| **Rule** +| <<Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli, Rule>> | Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_ |=== @@ -76,9 +80,10 @@ ansible-playbook -e cli_foo=bar someplays.yml === Ansible Global Variables ''' +[[Global-variables-MUST-have-a-prefix-of-g]] [cols="2v,v"] |=== -| **Rule** +| <<Global-variables-MUST-have-a-prefix-of-g, Rule>> | Global variables MUST have a prefix of g_ |=== Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc. @@ -94,9 +99,10 @@ g_environment: someval Ansible role variables are defined as variables contained in (or passed into) a role. ''' +[[Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules]] [cols="2v,v"] |=== -| **Rule** +| <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>> | Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules. |=== diff --git a/git/parent.py b/git/parent.py new file mode 100755 index 000000000..154a02350 --- /dev/null +++ b/git/parent.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +''' + Script to determine if this commit has also + been merged through the stage branch +''' +# +# Usage: +# parent_check.py <branch> <commit_id> +# +# +import sys +import subprocess + +def run_cli_cmd(cmd, in_stdout=None, in_stderr=None): + '''Run a command and return its output''' + if not in_stderr: + proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False) + else: + proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False) + stdout, stderr = proc.communicate() + if proc.returncode != 0: + return {"rc": proc.returncode, "error": stderr} + else: + return {"rc": proc.returncode, "result": stdout} + +def main(): + '''Check to ensure that the commit that is currently + being submitted is also in the stage branch. + + if it is, succeed + else, fail + ''' + branch = 'prod' + + if sys.argv[1] != branch: + sys.exit(0) + + # git co stg + results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg']) + + # git pull latest + results = run_cli_cmd(['/usr/bin/git', 'pull']) + + # setup on the <prod> branch in git + results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod']) + + results = run_cli_cmd(['/usr/bin/git', 'pull']) + # merge the passed in commit into my current <branch> + + commit_id = sys.argv[2] + results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id]) + + # get the differences from stg and <branch> + results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod']) + + # exit here with error code if the result coming back is an error + if results['rc'] != 0: + print results['error'] + sys.exit(results['rc']) + + count = 0 + # Each 'result' is a commit + # Walk through each commit and see if it is in stg + for commit in results['result'].split('\n'): + + # continue if it is already in stg + if not commit or commit.startswith('<'): + continue + + # remove the first char '>' + commit = commit[1:] + + # check if any remote branches contain $commit + results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None) + + # if this comes back empty, nothing contains it, we can skip it as + # we have probably created the merge commit here locally + if results['rc'] == 0 and len(results['result']) == 0: + continue + + # The results generally contain origin/pr/246/merge and origin/pr/246/head + # this is the pull request which would contain the commit in question. + # + # If the results do not contain origin/stg then stage does not contain + # the commit in question. Therefore we need to alert! + if 'origin/stg' not in results['result']: + print "\nFAILED: (These commits are not in stage.)\n" + print "\t%s" % commit + count += 1 + + # Exit with count of commits in #{branch} but not stg + sys.exit(count) + +if __name__ == '__main__': + main() + diff --git a/git/yaml_validate.py b/git/yaml_validate.py new file mode 100755 index 000000000..7e0a08a4b --- /dev/null +++ b/git/yaml_validate.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# python yaml validator for a git commit +# +''' +python yaml validator for a git commit +''' +import shutil +import sys +import os +import glob +import tempfile +import subprocess +import yaml + +def get_changes(oldrev, newrev, tempdir): + '''Get a list of git changes from oldrev to newrev''' + proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev, + newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE) + proc.wait() + files = proc.stdout.read().strip().split('\n') + + # No file changes + if not files: + return [] + + cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir) + proc = subprocess.Popen(cmd, shell=True) + proc.wait() + + return [fmod for fmod in glob.glob('%s/**/*' % tempdir) if not os.path.isdir(fmod)] + +def main(): + ''' + Perform yaml validation + ''' + results = [] + try: + tmpdir = tempfile.mkdtemp(prefix='jenkins-git-') + old, new, _ = sys.argv[1:] + + for file_mod in get_changes(old, new, tmpdir): + + print "+++++++ Received: %s" % file_mod + + if not file_mod.endswith('.yml') or not file_mod.endswith('.yaml'): + continue + + try: + yaml.load(file_mod) + results.append(True) + + except yaml.scanner.ScannerError as yerr: + print yerr.message + results.append(False) + finally: + shutil.rmtree(tmpdir) + + if not all(results): + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/openshift-ansible.spec b/openshift-ansible.spec index c8f6a2673..4d00c655b 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.32 +Version: 3.0.35 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,36 @@ Atomic OpenShift Utilities includes %changelog +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.35-1 +- added the lib_timedate role (mwoodson@redhat.com) +- added chrony (mwoodson@redhat.com) +- added oso_moniotoring tools role (mwoodson@redhat.com) +- Improve pacemaker 'is-active' check. (abutcher@redhat.com) + +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1 +- clean up too-many-branches / logic (jdiaz@redhat.com) +- atomic-openshift-installer: add containerized to inventory + (smunilla@redhat.com) +- Add 'unknown' to possible output for the is-active check. + (abutcher@redhat.com) +- Fix cluster_method conditional in master restart playbook. + (abutcher@redhat.com) +- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com) +- atomic-openshift-installer: Remove containerized install for 3.0 + (smunilla@redhat.com) +- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com) +- Remove pause after haproxy start (abutcher@redhat.com) +- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com) + +* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1 +- Configure nodes which are also masters prior to nodes in containerized + install. (abutcher@redhat.com) +- Call attention to openshift_master_rolling_restart_mode variable in restart + prompt. (abutcher@redhat.com) +- Added anchors for rules in style_guide.adoc in order to make it easier to + reference specific rules in PRs. (twiest@redhat.com) +- Update ec2.ini (jdetiber@redhat.com) + * Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1 - Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com) diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index de9f36c8a..0df77e309 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ -- hosts: OSv3 +- hosts: OSEv3 gather_facts: false tasks: - name: install python and deps for ansible modules diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 174cea460..d24e9cafa 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -20,7 +20,7 @@ # ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml # # Notes: -# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host @@ -36,7 +36,7 @@ vars: cli_volume_type: gp2 - cli_volume_size: 55 + cli_volume_size: 200 # cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index fa13a64cb..052892863 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -57,8 +57,10 @@ Warning: Running playbook from a host that will be restarted! Press CTRL+C and A to abort playbook execution. You may continue by pressing ENTER but the playbook will stop - executing once this system restarts and services must be - manually verified. + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' - set_fact: current_host: "{{ exists.stat.exists }}" @@ -66,17 +68,25 @@ - name: Determine which masters are currently active hosts: oo_masters_to_config + any_errors_fatal: true tasks: - name: Check master service status command: > systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output - when: openshift.master.cluster_method == 'pacemaker' - failed_when: active_check_output.stdout not in ['active', 'inactive'] + when: openshift.master.cluster_method | default(None) == 'pacemaker' + failed_when: false changed_when: false + # Any master which did not report 'active' or 'inactive' is likely + # unhealthy. Other possible states are 'unknown' or 'failed'. + - fail: + msg: > + Got invalid service state from {{ openshift.common.service_type }}-master + on {{ inventory_hostname }}. Please verify pacemaker cluster. + when: openshift.master.cluster_method | default(None) == 'pacemaker' and active_check_output.stdout not in ['active', 'inactive'] - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' - name: Evaluate master groups hosts: localhost diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 8d0c4945e..1d31657ed 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -154,21 +154,15 @@ validate_checksum: yes with_items: nodes_needing_certs -- name: Configure node instances +- name: Deploy node certificates hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - # TODO: Prefix flannel role variables. - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - pre_tasks: + tasks: - name: Ensure certificate directory exists file: path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart node # possibly test service started time against certificate/config file # timestamps in node to trigger notify @@ -177,8 +171,44 @@ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" dest: "{{ node_cert_dir }}" when: certs_missing + +- name: Evaluate node groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + +- name: Configure node instances + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + roles: + - openshift_node + +- name: Configure node instances + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" roles: - openshift_node + +- name: Additional node config + hosts: oo_nodes_to_config + vars: + # TODO: Prefix flannel role variables. + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + roles: - role: flannel when: openshift.common.use_flannel | bool - role: nickhammond.logrotate diff --git a/roles/chrony/README.md b/roles/chrony/README.md new file mode 100644 index 000000000..bf15d9669 --- /dev/null +++ b/roles/chrony/README.md @@ -0,0 +1,31 @@ +Role Name +========= + +A role to configure chrony as the ntp client + +Requirements +------------ + + +Role Variables +-------------- + +chrony_ntp_servers: a list of ntp servers to use the chrony.conf file + +Dependencies +------------ + +roles/lib_timedatectl + +Example Playbook +---------------- + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift Operations diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml new file mode 100644 index 000000000..95576e666 --- /dev/null +++ b/roles/chrony/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for chrony diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml new file mode 100644 index 000000000..1973c79e2 --- /dev/null +++ b/roles/chrony/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart chronyd + service: + name: chronyd + state: restarted diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml new file mode 100644 index 000000000..85595d7c3 --- /dev/null +++ b/roles/chrony/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: Openshift Operations + description: Configure chrony as an ntp server + company: Red Hat + license: Apache 2.0 + min_ansible_version: 1.9.2 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - system +dependencies: +- roles/lib_timedatectl diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml new file mode 100644 index 000000000..fae6d8e4c --- /dev/null +++ b/roles/chrony/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: remove ntp package + yum: + name: ntp + state: absent + +- name: ensure chrony package is installed + yum: + name: chrony + state: installed + +- name: Install /etc/chrony.conf + template: + src: chrony.conf.j2 + dest: /etc/chrony.conf + owner: root + group: root + mode: 0644 + notify: + - Restart chronyd + +- name: enabled timedatectl set-ntp yes + timedatectl: + ntp: True + +- name: + service: + name: chronyd + state: started + enabled: yes diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2 new file mode 100644 index 000000000..de43b6364 --- /dev/null +++ b/roles/chrony/templates/chrony.conf.j2 @@ -0,0 +1,45 @@ +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in chrony_ntp_servers %} +server {{ server }} iburst +{% endfor %} + +# Ignore stratum in source selection. +stratumweight 0 + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Enable kernel RTC synchronization. +rtcsync + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 10 seconds. +makestep 10 3 + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Listen for commands only on localhost. +bindcmdaddress 127.0.0.1 +bindcmdaddress ::1 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +keyfile /etc/chrony.keys + +# Specify the key used as password for chronyc. +commandkey 1 + +# Generate command key if missing. +generatecommandkey + +# Disable logging of client accesses. +noclientlog + +# Send a message to syslog if a clock adjustment is larger than 0.5 seconds. +logchange 0.5 + +logdir /var/log/chrony +#log measurements statistics tracking diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml new file mode 100644 index 000000000..061a21547 --- /dev/null +++ b/roles/chrony/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for chrony diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml index ee60adcab..5b8691b26 100644 --- a/roles/haproxy/handlers/main.yml +++ b/roles/haproxy/handlers/main.yml @@ -3,3 +3,4 @@ service: name: haproxy state: restarted + when: not (haproxy_start_result_changed | default(false) | bool) diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 97f870829..0b8370ce2 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -19,6 +19,5 @@ enabled: yes register: start_result -- name: Pause 30 seconds if haproxy was just started - pause: seconds=30 - when: start_result | changed +- set_fact: + haproxy_start_result_changed: "{{ start_result | changed }}" diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py new file mode 100644 index 000000000..b6eab5918 --- /dev/null +++ b/roles/lib_timedatectl/library/timedatectl.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +''' + timedatectl ansible module + + This module supports setting ntp enabled +''' +import subprocess + + + + +def do_timedatectl(options=None): + ''' subprocess timedatectl ''' + + cmd = ['/usr/bin/timedatectl'] + if options: + cmd += options.split() + + proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE) + proc.wait() + return proc.stdout.read() + +def main(): + ''' Ansible module for timedatectl + ''' + + module = AnsibleModule( + argument_spec=dict( + #state=dict(default='enabled', type='str'), + ntp=dict(default=True, type='bool'), + ), + #supports_check_mode=True + ) + + # do something + ntp_enabled = False + + results = do_timedatectl() + + for line in results.split('\n'): + if 'NTP enabled' in line: + if 'yes' in line: + ntp_enabled = True + + ######## + # Enable NTP + ######## + if module.params['ntp']: + if ntp_enabled: + module.exit_json(changed=False, results="enabled", state="enabled") + + # Enable it + # Commands to enable ntp + else: + results = do_timedatectl('set-ntp yes') + module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results) + + ######### + # Disable NTP + ######### + else: + if not ntp_enabled: + module.exit_json(changed=False, results="disabled", state="disabled") + + results = do_timedatectl('set-ntp no') + module.exit_json(changed=True, results="disabled", state="disabled") + + module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown") + +# Pylint is getting in the way of basic Ansible +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index c08bef4f7..2f9524556 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -81,6 +81,61 @@ def filter_differences(zabbix_filters, user_filters): return rval +def opconditions_diff(zab_val, user_val): + ''' Report whether there are differences between opconditions on + zabbix and opconditions supplied by user ''' + + if len(zab_val) != len(user_val): + return True + + for z_cond, u_cond in zip(zab_val, user_val): + if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ + ['conditiontype', 'operator', 'value']]): + return True + + return False + +def opmessage_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage on + zabbix and opmessage supplied by user ''' + + for op_msg_key, op_msg_val in user_val.items(): + if zab_val[op_msg_key] != str(op_msg_val): + return True + + return False + +def opmessage_grp_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_grp + on zabbix and opmessage_grp supplied by user ''' + + zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val]) + usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val]) + if usr_grp_ids != zab_grp_ids: + return True + + return False + +def opmessage_usr_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_usr + on zabbix and opmessage_usr supplied by user ''' + + zab_usr_ids = set([usr['usrid'] for usr in zab_val]) + usr_ids = set([usr['usrid'] for usr in user_val]) + if usr_ids != zab_usr_ids: + return True + + return False + +def opcommand_diff(zab_op_cmd, usr_op_cmd): + ''' Check whether user-provided opcommand matches what's already + stored in Zabbix ''' + + for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): + if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): + return True + return False + def host_in_zabbix(zab_hosts, usr_host): ''' Check whether a particular user host is already in the Zabbix list of hosts ''' @@ -106,23 +161,11 @@ def hostlist_in_zabbix(zab_hosts, usr_hosts): return True -def opcommand_diff(zab_op_cmd, usr_op_cmd): - ''' Check whether user-provided opcommand matches what's already - stored in Zabbix ''' - - for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): - if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): - return True - return False - -# This logic is quite complex. We are comparing two lists of dictionaries. -# The outer for-loops allow us to descend down into both lists at the same time -# and then walk over the key,val pairs of the incoming user dict's changes -# or updates. The if-statements are looking at different sub-object types and -# comparing them. The other suggestion on how to write this is to write a recursive -# compare function but for the time constraints and for complexity I decided to go -# this route. -# pylint: disable=too-many-branches +# We are comparing two lists of dictionaries (the one stored on zabbix and the +# one the user is providing). For each type of operation, determine whether there +# is a difference between what is stored on zabbix and what the user is providing. +# If there is a difference, we take the user-provided data for what needs to +# be stored/updated into zabbix. def operation_differences(zabbix_ops, user_ops): '''Determine the differences from user and zabbix for operations''' @@ -132,49 +175,41 @@ def operation_differences(zabbix_ops, user_ops): rval = {} for zab, user in zip(zabbix_ops, user_ops): - for key, val in user.items(): - if key == 'opconditions': - if len(zab[key]) != len(val): - rval[key] = val - break - for z_cond, u_cond in zip(zab[key], user[key]): - if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ - ['conditiontype', 'operator', 'value']]): - rval[key] = val - break - elif key == 'opmessage': - # Verify each passed param matches - for op_msg_key, op_msg_val in val.items(): - if zab[key][op_msg_key] != str(op_msg_val): - rval[key] = val - break - - elif key == 'opmessage_grp': - zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]]) - usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val]) - if usr_grp_ids != zab_grp_ids: - rval[key] = val - - elif key == 'opmessage_usr': - zab_usr_ids = set([usr['userid'] for usr in zab[key]]) - usr_ids = set([usr['userid'] for usr in val]) - if usr_ids != zab_usr_ids: - rval[key] = val - - elif key == 'opcommand': - if opcommand_diff(zab[key], val): - rval[key] = val - break + for oper in user.keys(): + if oper == 'opconditions' and opconditions_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage' and opmessage_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opcommand' and opcommand_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] # opcommand_grp can be treated just like opcommand_hst # as opcommand_grp[] is just a list of groups - elif key == 'opcommand_hst' or key == 'opcommand_grp': - if not hostlist_in_zabbix(zab[key], val): - rval[key] = val - break + elif oper == 'opcommand_hst' or oper == 'opcommand_grp': + if not hostlist_in_zabbix(zab[oper], user[oper]): + rval[oper] = user[oper] + + # if it's any other type of operation than the ones tested above + # just do a direct compare + elif oper not in ['opconditions', 'opmessage', 'opmessage_grp', + 'opmessage_usr', 'opcommand', 'opcommand_hst', + 'opcommand_grp'] \ + and str(zab[oper]) != str(user[oper]): + rval[oper] = user[oper] - elif zab[key] != str(val): - rval[key] = val return rval def get_users(zapi, users): diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml index 47749389e..61344357a 100644 --- a/roles/lib_zabbix/tasks/create_template.yml +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -57,6 +57,7 @@ expression: "{{ item.expression }}" priority: "{{ item.priority }}" url: "{{ item.url | default(None, True) }}" + status: "{{ item.status | default('', True) }}" with_items: template.ztriggers when: template.ztriggers is defined diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1edf21d9b..14a613786 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,4 +1,8 @@ --- +- name: Install NFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_atomic | bool + - name: Set seboolean to allow nfs storage plugin access from containers seboolean: name: virt_use_nfs diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml index a05e552e3..dd13e76f7 100644 --- a/roles/os_zabbix/vars/template_docker.yml +++ b/roles/os_zabbix/vars/template_docker.yml @@ -72,10 +72,12 @@ g_template_docker: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc' priority: high + # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6) - name: 'docker.container.dns.resolution failed on {HOST.NAME}' expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc' priority: average + status: disabled - name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}' expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0' diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md new file mode 100644 index 000000000..4215f9eeb --- /dev/null +++ b/roles/oso_monitoring_tools/README.md @@ -0,0 +1,54 @@ +Role Name +========= + +This role will install the Openshift Monitoring Utilities + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +osomt_zagg_client_config + +from vars/main.yml: + +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" + +Dependencies +------------ + +None + +Example Playbook +---------------- + +- role: "oso_monitoring_tools" + osomt_host_name: hostname + osomt_zagg_url: http://path.to/zagg_web + osomt_zagg_user: admin + osomt_zagg_password: password + osomt_zagg_ssl_verify: True + osomt_zagg_verbose: False + osomt_zagg_debug: False + +License +------- + +BSD + +Author Information +------------------ + +Openshift Operations diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml new file mode 100644 index 000000000..a17424f25 --- /dev/null +++ b/roles/oso_monitoring_tools/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml new file mode 100644 index 000000000..cefa780ab --- /dev/null +++ b/roles/oso_monitoring_tools/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml new file mode 100644 index 000000000..9c42b68dc --- /dev/null +++ b/roles/oso_monitoring_tools/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: OpenShift Operations + description: Install Openshift Monitoring tools + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml new file mode 100644 index 000000000..c90fc56e2 --- /dev/null +++ b/roles/oso_monitoring_tools/tasks/main.yml @@ -0,0 +1,18 @@ +--- +# tasks file for oso_monitoring_tools +- name: Install the Openshift Tools RPMS + yum: + name: "{{ item }}" + state: latest + with_items: + - openshift-tools-scripts-monitoring-zagg-client + - python-openshift-tools-monitoring-zagg + - python-openshift-tools-monitoring-zabbix + +- debug: var=g_zagg_client_config + +- name: Generate the /etc/openshift_tools/zagg_client.yaml config file + copy: + content: "{{ osomt_zagg_client_config | to_nice_yaml }}" + dest: /etc/openshift_tools/zagg_client.yaml + mode: "644" diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml new file mode 100644 index 000000000..3538ba30b --- /dev/null +++ b/roles/oso_monitoring_tools/vars/main.yml @@ -0,0 +1,12 @@ +--- +# vars file for oso_monitoring_tools +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 4e30929da..1aacf3a4b 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -127,14 +127,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen masters_set = True host_props['node'] = True - #TODO: Reenable this option once container installs are out of tech preview - rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - type=click.Choice(['rpm', 'container']), - default='rpm') - if rpm_or_container == 'container': - host_props['containerized'] = True - else: - host_props['containerized'] = False + host_props['containerized'] = False + if oo_cfg.settings['variant_version'] != '3.0': + rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') + if rpm_or_container == 'container': + host_props['containerized'] = True if existing_env: host_props['new_host'] = True diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 20401f812..c0d115fdc 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -128,6 +128,8 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_hostname={}'.format(host.hostname) if host.public_hostname: facts += ' openshift_public_hostname={}'.format(host.public_hostname) + if host.containerized: + facts += ' containerized={}'.format(host.containerized) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. |