summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--.travis.yml4
-rw-r--r--callback_plugins/openshift_quick_installer.py28
-rw-r--r--filter_plugins/oo_filters.py29
-rw-r--r--filter_plugins/oo_zabbix_filters.py160
-rw-r--r--filter_plugins/openshift_master.py22
-rw-r--r--git/.pylintrc6
-rwxr-xr-xlibrary/modify_yaml.py28
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py (renamed from playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py)11
-rw-r--r--playbooks/adhoc/noc/create_host.yml58
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml37
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml43
-rw-r--r--playbooks/adhoc/uninstall.yml8
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml60
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml19
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-master/restart_services.yml12
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py8
-rw-r--r--roles/openshift_certificate_expiry/README.md20
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py4
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py57
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml479
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py35
-rw-r--r--roles/openshift_facts/tasks/main.yml6
-rw-r--r--roles/openshift_facts/vars/main.yml7
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml10
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml3
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py4
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py4
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py6
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/templates/yum_repo.j24
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md9
-rw-r--r--roles/openshift_storage_nfs_lvm/defaults/main.yml7
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j22
-rw-r--r--utils/.coveragerc1
-rw-r--r--utils/Makefile18
-rw-r--r--utils/setup.cfg4
-rw-r--r--utils/src/ooinstall/cli_installer.py2
-rw-r--r--utils/src/ooinstall/oo_config.py27
-rw-r--r--utils/src/ooinstall/openshift_ansible.py22
-rw-r--r--utils/src/ooinstall/variants.py44
-rw-r--r--utils/test-requirements.txt2
-rw-r--r--utils/test/cli_installer_tests.py21
-rw-r--r--utils/test/fixture.py10
-rw-r--r--utils/test/oo_config_tests.py7
-rw-r--r--utils/test/test_utils.py12
-rw-r--r--utils/tox.ini17
64 files changed, 869 insertions, 640 deletions
diff --git a/.gitignore b/.gitignore
index 48507c5d1..ac249d5eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,7 @@ multi_inventory.yaml
ansible.cfg
*.retry
.vscode/*
+.cache
+.tox
+.coverage
+*.egg-info
diff --git a/.travis.yml b/.travis.yml
index b5b7a2a59..0e3a75df7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,9 +1,13 @@
---
sudo: false
+cache:
+ - pip
+
language: python
python:
- "2.7"
+ - "3.5"
install:
- pip install -r requirements.txt
diff --git a/callback_plugins/openshift_quick_installer.py b/callback_plugins/openshift_quick_installer.py
index fc9bfb899..b4c7edd38 100644
--- a/callback_plugins/openshift_quick_installer.py
+++ b/callback_plugins/openshift_quick_installer.py
@@ -36,30 +36,13 @@ What's different:
"""
from __future__ import (absolute_import, print_function)
-import imp
-import os
import sys
from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
-ANSIBLE_PATH = imp.find_module('ansible')[1]
-DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
-DEFAULT_MODULE = imp.load_source(
- 'ansible.plugins.callback.default',
- DEFAULT_PATH
-)
-try:
- from ansible.plugins.callback import CallbackBase
- BASECLASS = CallbackBase
-except ImportError: # < ansible 2.1
- BASECLASS = DEFAULT_MODULE.CallbackModule
-
-reload(sys)
-sys.setdefaultencoding('utf-8')
-
-
-class CallbackModule(DEFAULT_MODULE.CallbackModule):
+class CallbackModule(CallbackBase):
"""
Ansible callback plugin
@@ -286,8 +269,9 @@ The only thing we change here is adding `log_only=True` to the
self._display.display("", screen_only=True)
# Some plays are conditional and won't run (such as load
- # balancers) if they aren't required. Let the user know about
- # this to avoid potential confusion.
+ # balancers) if they aren't required. Sometimes plays are
+ # conditionally included later in the run. Let the user know
+ # about this to avoid potential confusion.
if self.plays_total_ran != self.plays_count:
- print("Installation Complete: Note: Play count is an estimate and some were skipped because your install does not require them")
+ print("Installation Complete: Note: Play count is only an estimate, some plays may have been skipped or dynamically added")
self._display.display("", screen_only=True)
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8fe85d8e2..bad1f6a3b 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -19,6 +19,7 @@ from distutils.version import LooseVersion
from operator import itemgetter
from ansible.parsing.yaml.dumper import AnsibleDumper
from urlparse import urlparse
+from six import string_types
HAS_OPENSSL = False
try:
@@ -120,7 +121,7 @@ class FilterModule(object):
raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
if not isinstance(variables, dict):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
- if not isinstance(inventory_hostname, basestring):
+ if not isinstance(inventory_hostname, string_types):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
# pylint: disable=no-member
ansible_version = pkg_resources.get_distribution("ansible").version
@@ -215,7 +216,7 @@ class FilterModule(object):
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not all(isinstance(x, basestring) for x in data):
+ if not all(isinstance(x, string_types) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
@@ -362,7 +363,7 @@ class FilterModule(object):
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
- if not isinstance(filter_attr, basestring):
+ if not isinstance(filter_attr, string_types):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
@@ -401,9 +402,9 @@ class FilterModule(object):
"""
if not isinstance(nodes, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(label, basestring):
+ if not isinstance(label, string_types):
raise errors.AnsibleFilterError("failed expects label to be a string")
- if value is not None and not isinstance(value, basestring):
+ if value is not None and not isinstance(value, string_types):
raise errors.AnsibleFilterError("failed expects value to be a string")
def label_filter(node):
@@ -419,7 +420,7 @@ class FilterModule(object):
else:
return False
- if isinstance(labels, basestring):
+ if isinstance(labels, string_types):
labels = yaml.safe_load(labels)
if not isinstance(labels, dict):
raise errors.AnsibleFilterError(
@@ -518,7 +519,7 @@ class FilterModule(object):
"cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
"names": [ "some-hostname.com" ] }]
"""
- if not isinstance(named_certs_dir, basestring):
+ if not isinstance(named_certs_dir, string_types):
raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
if not isinstance(internal_hostnames, list):
@@ -545,7 +546,7 @@ class FilterModule(object):
if cert.get_extension(i).get_short_name() == 'subjectAltName':
for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
certificate['names'].append(name)
- except:
+ except Exception:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
"please specify certificate names in host inventory"))
@@ -670,7 +671,7 @@ class FilterModule(object):
migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
'openshift_registry_selector': 'openshift_hosted_registry_selector'}
- for old_fact, new_fact in migrations.iteritems():
+ for old_fact, new_fact in migrations.items():
if old_fact in facts and new_fact not in facts:
facts[new_fact] = facts[old_fact]
return facts
@@ -781,7 +782,7 @@ class FilterModule(object):
"""
if not isinstance(rpms, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
- if openshift_version is not None and not isinstance(openshift_version, basestring):
+ if openshift_version is not None and not isinstance(openshift_version, string_types):
raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
rpms_31 = []
@@ -800,9 +801,9 @@ class FilterModule(object):
"""
if not isinstance(pods, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(deployment_type, basestring):
+ if not isinstance(deployment_type, string_types):
raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
- if not isinstance(component, basestring):
+ if not isinstance(component, string_types):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
@@ -843,7 +844,7 @@ class FilterModule(object):
Ex. v3.2.0.10 -> -3.2.0.10
v1.2.0-rc1 -> -1.2.0
"""
- if not isinstance(version, basestring):
+ if not isinstance(version, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
if version.startswith("v"):
version = version[1:]
@@ -861,7 +862,7 @@ class FilterModule(object):
Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
"""
- if not isinstance(url, basestring):
+ if not isinstance(url, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
parse_result = urlparse(url)
if parse_result.netloc != '':
diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py
deleted file mode 100644
index 1c1854b29..000000000
--- a/filter_plugins/oo_zabbix_filters.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
-Custom zabbix filters for use in openshift-ansible
-'''
-
-import pdb
-
-
-class FilterModule(object):
- ''' Custom zabbix ansible filters '''
-
- @staticmethod
- def create_data(data, results, key, new_key):
- '''Take a dict, filter through results and add results['key'] to dict
- '''
- new_list = [app[key] for app in results]
- data[new_key] = new_list
- return data
-
- @staticmethod
- def oo_set_zbx_trigger_triggerid(item, trigger_results):
- '''Set zabbix trigger id from trigger results
- '''
- if isinstance(trigger_results, list):
- item['triggerid'] = trigger_results[0]['triggerid']
- return item
-
- item['triggerid'] = trigger_results['triggerids'][0]
- return item
-
- @staticmethod
- def oo_set_zbx_item_hostid(item, template_results):
- ''' Set zabbix host id from template results
- '''
- if isinstance(template_results, list):
- item['hostid'] = template_results[0]['templateid']
- return item
-
- item['hostid'] = template_results['templateids'][0]
- return item
-
- @staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
- def select_by_name(ans_data, data):
- ''' test
- '''
- for zabbix_item in data:
- if ans_data['name'] == zabbix_item:
- data[zabbix_item]['params']['hostid'] = ans_data['templateid']
- return data[zabbix_item]['params']
- return None
-
- @staticmethod
- def oo_build_zabbix_collect(data, string, value):
- ''' Build a list of dicts from a list of data matched on string attribute
- '''
- rval = []
- for item in data:
- if item[string] == value:
- rval.append(item)
-
- return rval
-
- @staticmethod
- def oo_build_zabbix_list_dict(values, string):
- ''' Build a list of dicts with string as key for each value
- '''
- rval = []
- for value in values:
- rval.append({string: value})
- return rval
-
- @staticmethod
- def oo_remove_attr_from_list_dict(data, attr):
- ''' Remove a specific attribute from a dict
- '''
- attrs = []
- if isinstance(attr, str):
- attrs.append(attr)
- else:
- attrs = attr
-
- for attribute in attrs:
- for _entry in data:
- _entry.pop(attribute, None)
-
- return data
-
- @staticmethod
- def itservice_results_builder(data, clusters, keys):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for cluster in clusters:
- for results in data:
- if cluster == results['item'][0]:
- results = results['results']
- if results and len(results) > 0 and all([_key in results[0] for _key in keys]):
- tmp = {}
- tmp['clusterid'] = cluster
- for key in keys:
- tmp[key] = results[0][key]
- r_list.append(tmp)
-
- return r_list
-
- @staticmethod
- def itservice_dependency_builder(data, cluster):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for dep in data:
- if cluster == dep['clusterid']:
- r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'})
-
- return r_list
-
- @staticmethod
- def itservice_dep_builder_list(data):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for dep in data:
- r_list.append({'name': '%s' % dep, 'dep_type': 'hard'})
-
- return r_list
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {
- "select_by_name": self.select_by_name,
- "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid,
- "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
- "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
- "create_data": self.create_data,
- "oo_build_zabbix_collect": self.oo_build_zabbix_collect,
- "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
- "itservice_results_builder": self.itservice_results_builder,
- "itservice_dependency_builder": self.itservice_dependency_builder,
- "itservice_dep_builder_list": self.itservice_dep_builder_list,
- }
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 57b1f7d82..ec09b09f6 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -6,22 +6,14 @@ Custom filters for use in openshift-master
'''
import copy
import sys
-import yaml
+
+from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
+from ansible.plugins.filter.core import to_bool as ansible_bool
+from six import string_types
-# pylint: disable=no-name-in-module,import-error,wrong-import-order
-from distutils.version import LooseVersion
-try:
- # ansible-2.1
- from ansible.plugins.filter.core import to_bool as ansible_bool
-except ImportError:
- try:
- # ansible-2.0.x
- from ansible.runner.filter_plugins.core import bool as ansible_bool
- except ImportError:
- # ansible-1.9.x
- from ansible.plugins.filter.core import bool as ansible_bool
+import yaml
class IdentityProviderBase(object):
@@ -513,7 +505,7 @@ class FilterModule(object):
'master3.example.com']
returns True
'''
- if not issubclass(type(data), basestring):
+ if not issubclass(type(data), string_types):
raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
if not issubclass(type(masters), list):
raise errors.AnsibleFilterError("|failed expects masters is a list")
@@ -558,7 +550,7 @@ class FilterModule(object):
def oo_htpasswd_users_from_file(file_contents):
''' return a dictionary of htpasswd users from htpasswd file contents '''
htpasswd_entries = {}
- if not isinstance(file_contents, basestring):
+ if not isinstance(file_contents, string_types):
raise errors.AnsibleFilterError("failed, expects to filter on a string")
for line in file_contents.splitlines():
user = None
diff --git a/git/.pylintrc b/git/.pylintrc
index 9c98889b3..411330fe7 100644
--- a/git/.pylintrc
+++ b/git/.pylintrc
@@ -8,7 +8,7 @@
#init-hook=
# Profiled execution.
-profile=no
+#profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
@@ -98,7 +98,7 @@ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / stateme
# Add a comment according to your evaluation note. This is used by the global
# evaluation report (RP0004).
-comment=no
+#comment=no
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
@@ -249,7 +249,7 @@ ignored-classes=SQLObject
# When zope mode is activated, add a predefined set of Zope acquired attributes
# to generated-members.
-zope=no
+#zope=no
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
diff --git a/library/modify_yaml.py b/library/modify_yaml.py
index 000c7d9a2..d8d22d5ea 100755
--- a/library/modify_yaml.py
+++ b/library/modify_yaml.py
@@ -25,15 +25,22 @@ EXAMPLES = '''
def set_key(yaml_data, yaml_key, yaml_value):
changes = []
ptr = yaml_data
+ final_key = yaml_key.split('.')[-1]
for key in yaml_key.split('.'):
- if key not in ptr and key != yaml_key.split('.')[-1]:
+ # Key isn't present and we're not on the final key. Set to empty dictionary.
+ if key not in ptr and key != final_key:
ptr[key] = {}
ptr = ptr[key]
- elif key == yaml_key.split('.')[-1]:
+ # Current key is the final key. Update value.
+ elif key == final_key:
if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): # noqa: F405
ptr[key] = yaml_value
changes.append((yaml_key, yaml_value))
else:
+ # Next value is None and we're not on the final key.
+ # Turn value into an empty dictionary.
+ if ptr[key] is None and key != final_key:
+ ptr[key] = {}
ptr = ptr[key]
return changes
@@ -71,27 +78,24 @@ def main():
yaml.add_representer(type(None), none_representer)
try:
-
- yaml_file = open(dest)
- yaml_data = yaml.safe_load(yaml_file.read())
- yaml_file.close()
+ with open(dest) as yaml_file:
+ yaml_data = yaml.safe_load(yaml_file.read())
changes = set_key(yaml_data, yaml_key, yaml_value)
if len(changes) > 0:
if backup:
module.backup_local(dest)
- yaml_file = open(dest, 'w')
- yaml_string = yaml.dump(yaml_data, default_flow_style=False)
- yaml_string = yaml_string.replace('\'\'', '""')
- yaml_file.write(yaml_string)
- yaml_file.close()
+ with open(dest, 'w') as yaml_file:
+ yaml_string = yaml.dump(yaml_data, default_flow_style=False)
+ yaml_string = yaml_string.replace('\'\'', '""')
+ yaml_file.write(yaml_string)
return module.exit_json(changed=(len(changes) > 0), changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index c19274e06..daff68fbe 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -5,22 +5,11 @@
Custom filters for use in openshift-ansible
'''
-import pdb
-
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
def translate_volume_name(volumes, target_volume):
'''
This filter matches a device string /dev/sdX to /dev/xvdX
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
deleted file mode 100644
index 318396bcc..000000000
--- a/playbooks/adhoc/noc/create_host.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Template
- state: list
- params:
- host: ctr_test_kwoodson
- filter:
- host:
- - ctr_kwoodson_test_tmpl
-
- register: tmpl_results
-
- - debug: var=tmpl_results
-
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Host
- state: absent
- params:
- host: ctr_test_kwoodson
- interfaces:
- - type: 1
- main: 1
- useip: 1
- ip: 127.0.0.1
- dns: ""
- port: 10050
- groups:
- - groupid: 1
- templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
- output: extend
- filter:
- host:
- - ctr_test_kwoodson
-
- register: host_results
-
- - debug: var=host_results
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
deleted file mode 100644
index b694aea1b..000000000
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a maintenace object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- vars:
- oo_hostids: ''
- oo_groupids: ''
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Maintenance
- state: present
- params:
- name: "{{ oo_name }}"
- description: "{{ oo_desc }}"
- active_since: "{{ oo_start }}"
- active_till: "{{ oo_stop }}"
- maintenance_type: "0"
- output: extend
- hostids: "{{ oo_hostids.split(',') | default([]) }}"
- #groupids: "{{ oo_groupids.split(',') | default([]) }}"
- timeperiods:
- - start_time: "{{ oo_start }}"
- period: "{{ oo_stop }}"
- selectTimeperiods: extend
-
- register: maintenance
-
- - debug: var=maintenance
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
deleted file mode 100644
index 32fc7ce68..000000000
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: 'Get current hosts who have triggers that are alerting by trigger description'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Trigger
- state: list
- params:
- only_true: true
- output: extend
- selectHosts: extend
- searchWildCardsEnabled: 1
- search:
- description: "{{ oo_desc }}"
- register: problems
-
- - debug: var=problems
-
- - set_fact:
- problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
-
- - debug: var=problem_hosts
-
- - add_host:
- name: "{{ item }}"
- groups: problem_hosts_group
- with_items: "{{ problem_hosts }}"
-
-- name: "Run on problem hosts"
- hosts: problem_hosts_group
- gather_facts: no
- tasks:
- - command: "{{ oo_cmd }}"
- when: oo_cmd is defined
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index bdd92a47d..b9966e715 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -92,6 +92,8 @@
- atomic-enterprise-sdn-ovs
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-node
- atomic-openshift-sdn-ovs
- cockpit-bridge
@@ -105,6 +107,8 @@
- openshift-sdn-ovs
- openvswitch
- origin
+ - origin-excluder
+ - origin-docker-excluder
- origin-clients
- origin-node
- origin-sdn-ovs
@@ -254,6 +258,8 @@
- atomic-enterprise-master
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-master
- cockpit-bridge
- cockpit-docker
@@ -265,6 +271,8 @@
- openshift-master
- origin
- origin-clients
+ - origin-excluder
+ - origin-docker-excluder
- origin-master
- pacemaker
- pcs
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
deleted file mode 100644
index 955f990b7..000000000
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost:8080/zabbix/api_jsonrpc.php
- g_user: ''
- g_password: ''
-
- roles:
- - lib_zabbix
-
- post_tasks:
- - name: CLEAN List template for heartbeat
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template Heartbeat'
- register: templ_heartbeat
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Server'
- register: templ_zabbix_server
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Agent'
- register: templ_zabbix_agent
-
- - name: CLEAN List all templates
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- register: templates
-
- - debug: var=templ_heartbeat.results
-
- - name: Remove templates if heartbeat template is missing
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- name: "{{ item }}"
- state: absent
- with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
- when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/zabbix_setup/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
deleted file mode 100755
index 0fe65b338..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- include: clean_zabbix.yml
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
deleted file mode 100755
index 0d5e01878..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
- g_zbx_scriptrunner_user: scriptrunner
- g_zbx_scriptrunner_bastion_host: specialhost.example.com
- roles:
- - role: os_zabbix
- ozb_server: "{{ g_server }}"
- ozb_user: "{{ g_user }}"
- ozb_password: "{{ g_password }}"
- ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
- ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/zabbix_setup/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 0d451cf77..1e0a6d4e7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -18,20 +18,20 @@
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - name: Prepare for Node evacuation
+ - name: Prepare for Node draining
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 496b00697..d6115e7a5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,6 +89,8 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..5fc81bf3a 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -204,7 +204,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +222,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
index d800b289b..1b418920f 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -19,11 +19,9 @@
when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 0a972adf6..be42f005f 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -42,15 +42,28 @@
{{ avail_disk.stdout }} Kb available.
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- # TODO - Refactor containerized backup to use etcd_container to backup the data so we don't rely on
- # the host's etcdctl binary which may be of a different version.
-
- # for non containerized and non embedded we should have the correct version of etcd installed already
- # For embedded we need to use the latest because OCP 3.3 uses a version of etcd that can only be backed
- # up with etcd-3.x
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
- name: Install latest etcd for containerized or embedded
- package: name=etcd state=latest
- when: ( openshift.common.is_containerized and not openshift.common.is_atomic ) or embedded_etcd | bool
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
- name: Generate etcd backup
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
index f88981a0b..5f8b59e17 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -8,8 +8,7 @@
- name: Set new_etcd_image
set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
- else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
- name: Pull new etcd image
command: "docker pull {{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 5ff9521ec..0f8d94737 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -75,7 +75,7 @@
hosts: etcd_hosts_to_upgrade
serial: 1
vars:
- upgrade_version: 3.0.14
+ upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 1238acb05..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -146,7 +146,7 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 474e6311e..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -51,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -58,8 +66,9 @@
static: yes
roles:
- openshift_facts
- - openshift_master_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
- include: upgrade_scheduler.yml
- include: "{{ master_config_hook }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index cefc7d12b..68b111df4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -39,9 +39,9 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 25fa10450..b40c32669 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -10,13 +10,11 @@
state: restarted
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.common.hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Restart master controllers
service:
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 2cd454274..1857433c7 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -3,6 +3,8 @@
Ansible module for partitioning.
"""
+from __future__ import print_function
+
# There is no pyparted on our Jenkins worker
# pylint: disable=import-error
import parted
@@ -131,7 +133,7 @@ def partition(diskname, specs, force=False, check_mode=False):
disk = None
if disk and len(disk.partitions) > 0 and not force:
- print "skipping", diskname
+ print("skipping", diskname)
return 0
# create new partition table, wiping all existing data
@@ -220,7 +222,7 @@ def main():
try:
specs = parse_spec(sizes)
- except ValueError, ex:
+ except ValueError as ex:
err = "Error parsing sizes=" + sizes + ": " + str(ex)
module.fail_json(msg=err)
@@ -229,7 +231,7 @@ def main():
for disk in disks.split(","):
try:
changed_count += partition(disk, specs, force, module.check_mode)
- except Exception, ex:
+ except Exception as ex:
err = "Error creating partitions on " + disk + ": " + str(ex)
raise
# module.fail_json(msg=err)
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index d44438332..a88470bdd 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -9,7 +9,7 @@ include:
* Master/Node Service Certificates
* Router/Registry Service Certificates from etcd secrets
* Master/Node/Router/Registry/Admin `kubeconfig`s
-* Etcd certificates
+* Etcd certificates (including embedded)
This role pairs well with the redeploy certificates playbook:
@@ -111,12 +111,16 @@ There are two top-level keys in the saved JSON results, `data` and
`summary`.
The `data` key is a hash where the keys are the names of each host
-examined and the values are the check results for each respective
-host.
+examined and the values are the check results for the certificates
+identified on each respective host.
-The `summary` key is a hash that summarizes the number of certificates
-expiring within the configured warning window and the number of
-already expired certificates.
+The `summary` key is a hash that summarizes the total number of
+certificates:
+
+* examined on the entire cluster
+* OK
+* expiring within the configured warning window
+* already expired
The example below is abbreviated to save space:
@@ -193,7 +197,9 @@ The example below is abbreviated to save space:
},
"summary": {
"warning": 6,
- "expired": 0
+ "expired": 0,
+ "total": 7,
+ "ok": 1
}
}
```
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index bedd23fe8..5f102e960 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -51,9 +51,13 @@ Example playbook usage:
total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts])
total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts])
+ total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts])
+ total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts])
json_result['summary']['warning'] = total_warnings
json_result['summary']['expired'] = total_expired
+ json_result['summary']['ok'] = total_ok
+ json_result['summary']['total'] = total_total
return json_result
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index e838eb2d4..7161b5277 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -371,7 +371,7 @@ an OpenShift Container Platform cluster
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
- for _, v in cert_meta.iteritems():
+ for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
@@ -467,7 +467,11 @@ an OpenShift Container Platform cluster
######################################################################
# Check etcd certs
+ #
+ # Two things to check: 'external' etcd, and embedded etcd.
######################################################################
+ # FIRST: The 'external' etcd
+ #
# Some values may be duplicated, make this a set for now so we
# unique them all
etcd_certs_to_check = set([])
@@ -506,6 +510,43 @@ an OpenShift Container Platform cluster
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
######################################################################
+ # Now the embedded etcd
+ ######################################################################
+ try:
+ with open('/etc/origin/master/master-config.yaml', 'r') as fp:
+ cfg = yaml.load(fp)
+ except IOError:
+ # Not present
+ pass
+ else:
+ if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None:
+ # This is embedded
+ etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile']
+ else:
+ # Not embedded
+ etcd_crt_name = None
+
+ if etcd_crt_name is not None:
+ # etcd_crt_name is relative to the location of the
+ # master-config.yaml file
+ cfg_path = os.path.dirname(fp.name)
+ etcd_cert = os.path.join(cfg_path, etcd_crt_name)
+ with open(etcd_cert, 'r') as etcd_fp:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(etcd_fp.read(), now)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': etcd_fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
# /Check etcd certs
######################################################################
@@ -523,7 +564,7 @@ an OpenShift Container Platform cluster
######################################################################
# First the router certs
try:
- router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
+ router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
stdout=subprocess.PIPE)
router_ds = yaml.load(router_secrets_raw.communicate()[0])
router_c = router_ds['data']['tls.crt']
@@ -552,7 +593,7 @@ an OpenShift Container Platform cluster
######################################################################
# Now for registry
try:
- registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
+ registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
stdout=subprocess.PIPE)
registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
registry_c = registry_ds['data']['registry.crt']
@@ -613,9 +654,13 @@ an OpenShift Container Platform cluster
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
- check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..c8e3d4083
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 41ae07a48..d7e3596fd 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -26,6 +26,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
+from six import string_types
+from six import text_type
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -87,7 +89,7 @@ def migrate_docker_facts(facts):
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
@@ -226,7 +228,7 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
@@ -363,7 +365,7 @@ def normalize_aws_facts(metadata, facts):
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -772,7 +774,7 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
@@ -894,23 +896,31 @@ def set_version_facts_if_unset(facts):
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = False
+ version_gte_3_5_or_1_5 = False
+ version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
+ facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
+ facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
if version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
@@ -1280,15 +1290,14 @@ def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
- ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
- # pylint: disable=bare-except
- except:
+ except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
@@ -1449,7 +1458,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1511,7 +1520,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1614,7 +1623,7 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
@@ -1636,7 +1645,7 @@ def set_proxy_facts(facts):
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
# make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
+ if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], string_types):
builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
@@ -2220,12 +2229,12 @@ class OpenShiftFacts(object):
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 70cf49dd4..b7b521f1a 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,11 +10,9 @@
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-- name: Ensure PyYaml and yum-utils are installed
+- name: Ensure various deps are installed
package: name={{ item }} state=present
- with_items:
- - PyYAML
- - yum-utils
+ with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
- name: Gather Cluster facts and set is_containerized if needed
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
new file mode 100644
index 000000000..9c3110ff6
--- /dev/null
+++ b/roles/openshift_facts/vars/main.yml
@@ -0,0 +1,7 @@
+---
+required_packages:
+ - iproute
+ - python-dbus
+ - python-six
+ - PyYAML
+ - yum-utils
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index b6d007835..d87a3847c 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -34,9 +34,9 @@
- name: Create registry certificates if they do not exist
command: >
{{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert=/etc/origin/master/ca.crt
- --signer-key=/etc/origin/master/ca.key
- --signer-serial=/etc/origin/master/ca.serial.txt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
--hostnames="{{ docker_registry_service_ip.stdout }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift_master_config_dir }}/registry.crt
--key={{ openshift_master_config_dir }}/registry.key
@@ -65,12 +65,12 @@
- name: Determine if registry-certificates secret volume attached
command: >
{{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.volumes[*].secret.secretName}'
+ -o jsonpath='{.spec.template.spec.volumes[?(@.secret)].secret.secretName}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_volumes
changed_when: false
- failed_when: "'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
+ failed_when: "docker_registry_volumes.stdout != '' and 'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
- name: Attach registry-certificates secret volume
command: >
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
index 13cef2d66..c47d5361d 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
@@ -72,7 +72,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -83,7 +82,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index ddfda1272..c67058696 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -81,7 +81,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -92,7 +91,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -103,7 +101,6 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
- kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 4f7461827..b0984b004 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,10 +40,10 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 7087ff03c..4d6572dae 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -45,10 +45,10 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index c95356908..07bac6826 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -65,7 +65,11 @@ TEST_VARS = [
('1.3', 'origin', DEFAULT_PREDICATES_1_3),
('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
('1.4', 'origin', DEFAULT_PREDICATES_1_4),
- ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.5', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.6', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
]
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index d1920c485..616f44c1d 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -43,7 +43,7 @@ Currently we support re-labeling nodes but we don't re-schedule running pods nor
```
oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --evacuate ${NODE}
+oadm manage-node --drain ${NODE}
oadm manage-node --schedulable=true ${NODE}
````
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index d5ed9c09d..23dcd0440 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,7 +37,7 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
- name: Configure origin yum repositories RHEL/CentOS
copy:
@@ -47,4 +47,4 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 2d9243545..ef2cd6603 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -2,9 +2,9 @@
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default('1') %}
+{% set enable_repo = repo.enabled | default(1) %}
enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
{% for key, value in repo.iteritems() %}
{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 8b8471745..cc674d3fd 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -48,6 +48,13 @@ osnl_volume_num_start: 3
# How many volumes/partitions to build, with the size we stated.
osnl_number_of_volumes: 2
+# osnl_volume_reclaim_policy
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle" (default).
+osnl_volume_reclaim_policy: "Recycle"
+
```
## Dependencies
@@ -71,6 +78,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
## Full example
@@ -96,6 +104,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
* Run the playbook:
```
diff --git a/roles/openshift_storage_nfs_lvm/defaults/main.yml b/roles/openshift_storage_nfs_lvm/defaults/main.yml
index f81cdc724..48352187c 100644
--- a/roles/openshift_storage_nfs_lvm/defaults/main.yml
+++ b/roles/openshift_storage_nfs_lvm/defaults/main.yml
@@ -8,3 +8,10 @@ osnl_mount_dir: /exports/openshift
# Volume Group to use.
osnl_volume_group: openshiftvg
+
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle".
+# See https://docs.openshift.com/enterprise/3.0/architecture/additional_concepts/storage.html#pv-recycling-policy
+osnl_volume_reclaim_policy: "Recycle"
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 3c4d2f56c..19e150f7d 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -12,7 +12,7 @@
"storage": "{{ osnl_volume_size }}Gi"
},
"accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
- "persistentVolumeReclaimPolicy": "Recycle",
+ "persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}",
"nfs": {
"Server": "{{ inventory_hostname }}",
"Path": "{{ osnl_mount_dir }}/{{ item }}"
diff --git a/utils/.coveragerc b/utils/.coveragerc
index 0c870af42..e1d918755 100644
--- a/utils/.coveragerc
+++ b/utils/.coveragerc
@@ -1,4 +1,5 @@
[run]
omit=
*/lib/python*/site-packages/*
+ */lib/python*/*
/usr/*
diff --git a/utils/Makefile b/utils/Makefile
index c061edd8c..0e1cd79dd 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -33,8 +33,8 @@ MANPAGES := docs/man/man1/atomic-openshift-installer.1
VERSION := 1.3
# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
-YAMLFILES = $(shell find ../ -name $(VENV) -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" 2>&1)
-PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1)
+PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print)
sdist: clean
python setup.py sdist
@@ -83,7 +83,8 @@ ci-unittests: $(VENV)
@echo "#############################################"
@echo "# Running Unit Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python setup.py nosetests
+ . $(VENV)/bin/activate && tox -e py27-unit
+ . $(VENV)/bin/activate && tox -e py35-unit
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
ci-pylint: $(VENV)
@@ -108,12 +109,15 @@ ci-flake8: $(VENV)
@echo "#############################################"
@echo "# Running Flake8 Compliance Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory"
- . $(VENV)/bin/activate && python setup.py flake8
+ . $(VENV)/bin/activate && tox -e py27-flake8
+ . $(VENV)/bin/activate && tox -e py35-flake8
-ci: ci-list-deps ci-unittests ci-flake8 ci-pylint ci-yamllint
+ci-tox:
+ . $(VENV)/bin/activate && tox
+
+ci: ci-list-deps ci-tox ci-pylint ci-yamllint
@echo
@echo "##################################################################################"
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
@echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
+ @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
diff --git a/utils/setup.cfg b/utils/setup.cfg
index ee3288fc5..ea07eea9f 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -17,5 +17,5 @@ cover-branches=1
[flake8]
max-line-length=120
-exclude=tests/*,setup.py
-ignore=E501,E121,E124
+exclude=test/*,setup.py,oo-installenv
+ignore=E501
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 7e5ad4144..b70bd1817 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -501,7 +501,7 @@ def get_variant_and_version(multi_master=False):
i = 1
combos = get_variant_version_combos()
- for (variant, version) in combos:
+ for (variant, _) in combos:
message = "%s\n(%s) %s" % (message, i, variant.description)
i = i + 1
message = "%s\n" % message
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 64eb340f3..cf14105af 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -1,5 +1,7 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+from __future__ import (absolute_import, print_function)
+
import os
import sys
import logging
@@ -50,7 +52,7 @@ Error loading config. {}.
See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
for information on creating a configuration file or delete {} and re-run the installer.
"""
- print message.format(error, path)
+ print(message.format(error, path))
class OOConfigFileError(Exception):
@@ -103,7 +105,7 @@ class Host(object):
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
- for variable, value in self.other_variables.iteritems():
+ for variable, value in self.other_variables.items():
d[variable] = value
return d
@@ -256,16 +258,16 @@ class OOConfig(object):
# Parse the hosts into DTO objects:
for host in host_list:
host['other_variables'] = {}
- for variable, value in host.iteritems():
+ for variable, value in host.items():
if variable not in HOST_VARIABLES_BLACKLIST:
host['other_variables'][variable] = value
self.deployment.hosts.append(Host(**host))
# Parse the roles into Objects
- for name, variables in role_list.iteritems():
+ for name, variables in role_list.items():
self.deployment.roles.update({name: Role(name, variables)})
- except IOError, ferr:
+ except IOError as ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
@@ -354,14 +356,13 @@ class OOConfig(object):
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(os.path.dirname(self.config_path))
- # pylint: disable=consider-iterating-dictionary
- # Disabled because we shouldn't alter the container we're
- # iterating over
- #
# clean up any empty sets
- for setting in self.settings.keys():
+ empty_keys = []
+ for setting in self.settings:
if not self.settings[setting]:
- self.settings.pop(setting)
+ empty_keys.append(setting)
+ for key in empty_keys:
+ self.settings.pop(key)
installer_log.debug("Updated OOConfig settings: %s", self.settings)
@@ -410,7 +411,7 @@ class OOConfig(object):
for host in self.deployment.hosts:
p_settings['deployment']['hosts'].append(host.to_dict())
- for name, role in self.deployment.roles.iteritems():
+ for name, role in self.deployment.roles.items():
p_settings['deployment']['roles'][name] = role.variables
for setting in self.deployment.variables:
@@ -424,7 +425,7 @@ class OOConfig(object):
if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
except KeyError as e:
- print "Error persisting settings: {}".format(e)
+ print("Error persisting settings: {}".format(e))
sys.exit(0)
return p_settings
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index f542fb376..ce6e54664 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -1,5 +1,7 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+from __future__ import (absolute_import, print_function)
+
import socket
import subprocess
import sys
@@ -107,12 +109,12 @@ def write_inventory_vars(base_inventory, lb):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
- for variable, value in CFG.settings.iteritems():
+ for variable, value in CFG.settings.items():
inventory_var = VARIABLES_MAP.get(variable, None)
if inventory_var and value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
- for variable, value in CFG.deployment.variables.iteritems():
+ for variable, value in CFG.deployment.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
@@ -152,11 +154,11 @@ def write_inventory_vars(base_inventory, lb):
"'baseurl': '{}', "
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
- for name, role_obj in CFG.deployment.roles.iteritems():
+ for name, role_obj in CFG.deployment.roles.items():
if role_obj.variables:
group_name = ROLES_TO_GROUPS_MAP.get(name, name)
base_inventory.write("\n[{}:vars]\n".format(group_name))
- for variable, value in role_obj.variables.iteritems():
+ for variable, value in role_obj.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
@@ -193,7 +195,7 @@ def write_host(host, role, inventory, schedulable=None):
facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
if host.other_variables:
- for variable, value in host.other_variables.iteritems():
+ for variable, value in host.other_variables.items():
facts += " {}={}".format(variable, value)
if host.node_labels and role == 'node':
@@ -210,9 +212,9 @@ def write_host(host, role, inventory, schedulable=None):
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
- no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n'])
if no_pwd_sudo == 1:
- print 'The atomic-openshift-installer requires sudo access without a password.'
+ print('The atomic-openshift-installer requires sudo access without a password.')
sys.exit(1)
facts += ' ansible_become=yes'
@@ -245,9 +247,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
- except yaml.YAMLError, exc:
- print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc
- print "Try deleting and rerunning the atomic-openshift-installer"
+ except yaml.YAMLError as exc:
+ print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc)
+ print("Try deleting and rerunning the atomic-openshift-installer")
sys.exit(1)
return callback_facts, 0
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 39772bb2e..a45be98bf 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -38,32 +38,24 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
-OSE = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.4', 'openshift-enterprise'),
- ]
-)
-
-REG = Variant('openshift-enterprise', 'Registry',
- [
- Version('3.4', 'openshift-enterprise', 'registry'),
- ]
-)
-
-origin = Variant('origin', 'OpenShift Origin',
- [
- Version('1.4', 'origin'),
- ]
-)
-
-LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.3', 'openshift-enterprise'),
- Version('3.2', 'openshift-enterprise'),
- Version('3.1', 'openshift-enterprise'),
- Version('3.0', 'openshift-enterprise'),
- ]
-)
+OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.4', 'openshift-enterprise'),
+])
+
+REG = Variant('openshift-enterprise', 'Registry', [
+ Version('3.4', 'openshift-enterprise', 'registry'),
+])
+
+origin = Variant('origin', 'OpenShift Origin', [
+ Version('1.4', 'origin'),
+])
+
+LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.3', 'openshift-enterprise'),
+ Version('3.2', 'openshift-enterprise'),
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'openshift-enterprise'),
+])
# Ordered list of variants we can install, first is the default.
SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY)
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index b70a03563..e5c5360c3 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -1,5 +1,4 @@
ansible
-enum
configparser
pylint
nose
@@ -11,3 +10,4 @@ click
backports.functools_lru_cache
pyOpenSSL
yamllint
+tox
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 36dc18034..0cb37eaff 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -4,7 +4,8 @@
import copy
import os
-import ConfigParser
+
+from six.moves import configparser
import ooinstall.cli_installer as cli
@@ -408,7 +409,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
if result.exception is None or result.exit_code != 1:
- print "Exit code: %s" % result.exit_code
+ print("Exit code: %s" % result.exit_code)
self.fail("Unexpected CLI return")
# unattended with config file and all installed hosts (with --force)
@@ -523,7 +524,7 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 0)
# Check the inventory file looks as we would expect:
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('root',
inventory.get('OSEv3:vars', 'ansible_ssh_user'))
@@ -566,7 +567,7 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals('3.3', written_config['variant_version'])
# Make sure the correct value was passed to ansible:
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@@ -594,7 +595,7 @@ class UnattendedCliTests(OOCliFixture):
# and written to disk:
self.assertEquals('3.3', written_config['variant_version'])
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@@ -830,7 +831,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
@@ -949,7 +950,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 6)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
@@ -990,7 +991,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 5)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
@@ -1082,7 +1083,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 1)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
@@ -1116,7 +1117,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 62135c761..5200d275d 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -65,13 +65,13 @@ class OOCliFixture(OOInstallFixture):
def assert_result(self, result, exit_code):
if result.exit_code != exit_code:
- print "Unexpected result from CLI execution"
- print "Exit code: %s" % result.exit_code
- print "Exception: %s" % result.exception
- print result.exc_info
+ print("Unexpected result from CLI execution")
+ print("Exit code: %s" % result.exit_code)
+ print("Exception: %s" % result.exception)
+ print(result.exc_info)
import traceback
traceback.print_exception(*result.exc_info)
- print "Output:\n%s" % result.output
+ print("Output:\n%s" % result.output)
self.fail("Exception during CLI execution")
def _verify_load_facts(self, load_facts_mock):
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 56fd82408..2b4fce512 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -2,13 +2,14 @@
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
-import cStringIO
import os
import unittest
import tempfile
import shutil
import yaml
+from six.moves import cStringIO
+
from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
import ooinstall.openshift_ansible
@@ -244,7 +245,7 @@ class HostTests(OOInstallFixture):
}
new_node = Host(**yaml_props)
- inventory = cStringIO.StringIO()
+ inventory = cStringIO()
# This is what the 'write_host' function generates. write_host
# has no return value, it just writes directly to the file
# 'inventory' which in this test-case is a StringIO object
@@ -285,7 +286,7 @@ class HostTests(OOInstallFixture):
# }
# new_node = Host(**yaml_props)
- # inventory = cStringIO.StringIO()
+ # inventory = cStringIO()
# # This is what the original 'write_host' function will
# # generate. write_host has no return value, it just writes
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index 2e59d86f2..b18f85692 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -2,6 +2,7 @@
Unittests for ooinstall utils.
"""
+import six
import unittest
import logging
import sys
@@ -28,9 +29,6 @@ class TestUtils(unittest.TestCase):
mock.call('OO_FOO: bar'),
]
- # python 2.x has assertItemsEqual, python 3.x has assertCountEqual
- if sys.version_info.major > 3:
- self.assertItemsEqual = self.assertCountEqual
######################################################################
# Validate ooinstall.utils.debug_env functionality
@@ -40,7 +38,7 @@ class TestUtils(unittest.TestCase):
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(self.debug_all_params)
- print _il.debug.call_args_list
+ print(_il.debug.call_args_list)
# Debug was called for each item we expect
self.assertEqual(
@@ -48,7 +46,8 @@ class TestUtils(unittest.TestCase):
_il.debug.call_count)
# Each item we expect was logged
- self.assertItemsEqual(
+ six.assertCountEqual(
+ self,
self.expected,
_il.debug.call_args_list)
@@ -67,7 +66,8 @@ class TestUtils(unittest.TestCase):
_il.debug.call_count,
len(debug_some_params))
- self.assertItemsEqual(
+ six.assertCountEqual(
+ self,
self.expected,
_il.debug.call_args_list)
diff --git a/utils/tox.ini b/utils/tox.ini
new file mode 100644
index 000000000..747d79dfe
--- /dev/null
+++ b/utils/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion=2.3.1
+envlist =
+ py{27,35}-{flake8,unit}
+skipsdist=True
+skip_missing_interpreters=True
+
+[testenv]
+usedevelop=True
+deps =
+ -rtest-requirements.txt
+ py35-flake8: flake8-bugbear
+
+commands =
+ flake8: flake8 --config=setup.cfg ../ --exclude="../utils,.tox,../inventory"
+ flake8: python setup.py flake8
+ unit: python setup.py nosetests