summaryrefslogtreecommitdiffstats
path: root/roles/openshift_facts
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_facts')
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py610
-rw-r--r--roles/openshift_facts/tasks/main.yml12
-rw-r--r--roles/openshift_facts/vars/main.yml7
3 files changed, 441 insertions, 188 deletions
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index e94cb0952..10121f82a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,18 +7,39 @@
"""Ansible module for retrieving and setting openshift related facts"""
-import ConfigParser
+# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
+import errno
+import json
+import re
import io
import os
import yaml
-from distutils.util import strtobool
-from distutils.version import LooseVersion
import struct
import socket
-from dbus import SystemBus, Interface
-from dbus.exceptions import DBusException
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+from six import string_types, text_type
+from six.moves import configparser
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import * # noqa: F403
+from ansible.module_utils.facts import * # noqa: F403
+from ansible.module_utils.urls import * # noqa: F403
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+from ansible.module_utils._text import to_native
+
+HAVE_DBUS = False
+
+try:
+ from dbus import SystemBus, Interface
+ from dbus.exceptions import DBusException
+ HAVE_DBUS = True
+except ImportError:
+ pass
DOCUMENTATION = '''
---
@@ -47,6 +68,7 @@ def migrate_docker_facts(facts):
}
if 'docker' not in facts:
facts['docker'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -55,17 +77,17 @@ def migrate_docker_facts(facts):
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
- facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
- # we now prefer an actual list, with backward compatability:
+ # we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
+
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
@@ -76,6 +98,7 @@ def migrate_common_facts(facts):
}
if 'common' not in facts:
facts['common'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -83,6 +106,7 @@ def migrate_common_facts(facts):
facts['common'][param] = facts[role].pop(param)
return facts
+
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
@@ -90,6 +114,7 @@ def migrate_node_facts(facts):
}
if 'node' not in facts:
facts['node'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -97,14 +122,6 @@ def migrate_node_facts(facts):
facts['node'][param] = facts[role].pop(param)
return facts
-def migrate_local_facts(facts):
- """ Apply migrations of local facts """
- migrated_facts = copy.deepcopy(facts)
- migrated_facts = migrate_docker_facts(migrated_facts)
- migrated_facts = migrate_common_facts(migrated_facts)
- migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
- return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
@@ -123,6 +140,34 @@ def migrate_hosted_facts(facts):
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
+
+def migrate_admission_plugin_facts(facts):
+ """ Apply migrations for admission plugin facts """
+ if 'master' in facts:
+ if 'kube_admission_plugin_config' in facts['master']:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ # Merge existing kube_admission_plugin_config with admission_plugin_config.
+ facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
+ facts['master']['kube_admission_plugin_config'],
+ additive_facts_to_overwrite=[],
+ protected_facts_to_overwrite=[])
+ # Remove kube_admission_plugin_config fact
+ facts['master'].pop('kube_admission_plugin_config', None)
+ return facts
+
+
+def migrate_local_facts(facts):
+ """ Apply migrations of local facts """
+ migrated_facts = copy.deepcopy(facts)
+ migrated_facts = migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_common_facts(migrated_facts)
+ migrated_facts = migrate_node_facts(migrated_facts)
+ migrated_facts = migrate_hosted_facts(migrated_facts)
+ migrated_facts = migrate_admission_plugin_facts(migrated_facts)
+ return migrated_facts
+
+
def first_ip(network):
""" Return the first IPv4 address in network
@@ -131,13 +176,14 @@ def first_ip(network):
Returns:
str: first IPv4 address
"""
- atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
- itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
+
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -175,11 +221,8 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames
- if (i is not None and isinstance(i, basestring)
- and re.match(ip_regex, i))]
- hosts = [i for i in hostnames
- if i is not None and i != '' and i not in ips]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
+ hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
@@ -199,13 +242,13 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
Returns:
dict or list: metadata request result
"""
- result, info = fetch_url(module, metadata_url, headers=headers)
+ result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
- return module.from_json(result.read())
+ return module.from_json(to_native(result.read())) # noqa: F405
else:
- return [line.strip() for line in result.readlines()]
+ return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
@@ -313,9 +356,9 @@ def normalize_aws_facts(metadata, facts):
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
- for ips_var, int_var in var_map.iteritems():
+ for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -364,12 +407,15 @@ def normalize_openstack_facts(metadata, facts):
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- # TODO: verify local hostname makes sense and is resolvable
- facts['network']['hostname'] = metadata['hostname']
-
- # TODO: verify that public hostname makes sense and is resolvable
- pub_h = metadata['ec2_compat']['public-hostname']
- facts['network']['public_hostname'] = pub_h
+ for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
+ ('public_hostname', 'public-hostname', 'public-ipv4')]:
+ try:
+ if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
+ facts['network'][f_var] = metadata['ec2_compat'][h_var]
+ else:
+ facts['network'][f_var] = metadata['ec2_compat'][ip_var]
+ except socket.gaierror:
+ facts['network'][f_var] = metadata['ec2_compat'][ip_var]
return facts
@@ -402,6 +448,7 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
@@ -419,6 +466,7 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
@@ -436,6 +484,7 @@ def set_nuage_facts_if_unset(facts):
facts['common']['use_nuage'] = use_nuage
return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -453,6 +502,7 @@ def set_node_schedulability(facts):
facts['node']['schedulable'] = True
return facts
+
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -489,6 +539,7 @@ def set_selectors(facts):
return facts
+
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
@@ -498,8 +549,8 @@ def set_dnsmasq_facts_if_unset(facts):
"""
if 'common' in facts:
- facts['common']['use_dnsmasq'] = bool('use_dnsmasq' not in facts['common'] and
- safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
+ if 'use_dnsmasq' not in facts['common']:
+ facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
if 'master' in facts and 'dns_port' not in facts['master']:
if safe_get_bool(facts['common']['use_dnsmasq']):
facts['master']['dns_port'] = 8053
@@ -508,6 +559,7 @@ def set_dnsmasq_facts_if_unset(facts):
return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -535,6 +587,7 @@ def set_project_cfg_facts_if_unset(facts):
return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -561,6 +614,7 @@ def set_identity_providers_if_unset(facts):
return facts
+
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -620,7 +674,6 @@ def set_url_facts_if_unset(facts):
host,
ports[prefix]))
-
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
@@ -636,6 +689,7 @@ def set_url_facts_if_unset(facts):
return facts
+
def set_aggregate_facts(facts):
""" Set aggregate facts
@@ -713,9 +767,9 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -731,6 +785,7 @@ def set_etcd_facts_if_unset(facts):
return facts
+
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
@@ -811,6 +866,21 @@ def set_deployment_facts_if_unset(facts):
return facts
+
+def set_evacuate_or_drain_option(facts):
+ """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
+'--drain'. Let's make that a fact for easy reference later.
+ """
+ if facts['common']['version_gte_3_5_or_1_5']:
+ # New-style
+ facts['common']['evacuate_or_drain'] = '--drain'
+ else:
+ # Old-style
+ facts['common']['evacuate_or_drain'] = '--evacuate'
+
+ return facts
+
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -822,31 +892,46 @@ def set_version_facts_if_unset(facts):
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
- version = get_openshift_version(facts)
- if version:
- facts['common']['version'] = version
+ openshift_version = get_openshift_version(facts)
+ if openshift_version:
+ version = LooseVersion(openshift_version)
+ facts['common']['version'] = openshift_version
+ facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
else:
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
- version_gte_3_3_or_1_3 = False
+ version_gte_3_3_or_1_3 = True
+ version_gte_3_4_or_1_4 = False
+ version_gte_3_5_or_1_5 = False
+ version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
+ facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
+ facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
+ facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
-
- if version_gte_3_3_or_1_3:
+ if version_gte_3_4_or_1_4:
+ examples_content_version = 'v1.4'
+ elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
elif version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
@@ -859,6 +944,7 @@ def set_version_facts_if_unset(facts):
return facts
+
def set_manageiq_facts_if_unset(facts):
""" Set manageiq facts. This currently includes common.use_manageiq.
@@ -879,6 +965,7 @@ def set_manageiq_facts_if_unset(facts):
return facts
+
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -889,6 +976,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
+ # pylint: disable=too-many-branches
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
@@ -899,10 +987,29 @@ def set_sdn_facts_if_unset(facts, system_facts):
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
+ # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
+ # these might be overridden if they exist in the master config file
+ sdn_cluster_network_cidr = '10.128.0.0/14'
+ sdn_host_subnet_length = '9'
+
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ if os.path.isfile(master_cfg_path):
+ with open(master_cfg_path, 'r') as master_cfg_f:
+ config = yaml.safe_load(master_cfg_f.read())
+
+ if 'networkConfig' in config:
+ if 'clusterNetworkCIDR' in config['networkConfig']:
+ sdn_cluster_network_cidr = \
+ config['networkConfig']['clusterNetworkCIDR']
+ if 'hostSubnetLength' in config['networkConfig']:
+ sdn_host_subnet_length = \
+ config['networkConfig']['hostSubnetLength']
+
if 'sdn_cluster_network_cidr' not in facts['master']:
- facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
+ facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
- facts['master']['sdn_host_subnet_length'] = '8'
+ facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
@@ -910,7 +1017,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
- for val in system_facts.itervalues():
+ for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
@@ -919,7 +1026,9 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+
def set_nodename(facts):
+ """ set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
@@ -927,6 +1036,7 @@ def set_nodename(facts):
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
+
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -946,6 +1056,7 @@ def migrate_oauth_template_facts(facts):
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
+
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -968,6 +1079,7 @@ def format_url(use_ssl, hostname, port, path=''):
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
+
def get_current_config(facts):
""" Get current openshift config
@@ -997,10 +1109,9 @@ def get_current_config(facts):
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
- if (os.path.isfile('/usr/bin/openshift')
- and os.path.isfile(kubeconfig_path)):
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
- _, output, _ = module.run_command(
+ _, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
@@ -1031,13 +1142,25 @@ def get_current_config(facts):
return current_config
+
def build_kubelet_args(facts):
- """ Build node kubelet_args """
- cloud_cfg_path = os.path.join(facts['common']['config_base'],
- 'cloudprovider')
+ """Build node kubelet_args
+
+In the node-config.yaml file, kubeletArgument sub-keys have their
+values provided as a list. Hence the gratuitous use of ['foo'] below.
+ """
+ cloud_cfg_path = os.path.join(
+ facts['common']['config_base'],
+ 'cloudprovider')
+
+ # We only have to do this stuff on hosts that are nodes
if 'node' in facts:
+ # Any changes to the kubeletArguments parameter are stored
+ # here first.
kubelet_args = {}
+
if 'cloudprovider' in facts:
+ # EVERY cloud is special <3
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
@@ -1047,10 +1170,35 @@ def build_kubelet_args(facts):
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
kubelet_args['cloud-provider'] = ['gce']
+ kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
+
+ # Automatically add node-labels to the kubeletArguments
+ # parameter. See BZ1359848 for additional details.
+ #
+ # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
+ if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
+ # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
+ # into ['foo=bar', 'a=b']
+ #
+ # On the openshift_node_labels inventory variable we loop
+ # over each key-value tuple (from .items()) and join the
+ # key to the value with an '=' character, this produces a
+ # list.
+ #
+ # map() seems to be returning an itertools.imap object
+ # instead of a list. We cast it to a list ourselves.
+ # pylint: disable=unnecessary-lambda
+ labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
+ if labels_str != '':
+ kubelet_args['node-labels'] = labels_str
+
+ # If we've added items to the kubelet_args dict then we need
+ # to merge the new items back into the main facts object.
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
+
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1067,10 +1215,12 @@ def build_controller_args(facts):
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
+ controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
+
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1087,17 +1237,19 @@ def build_api_server_args(facts):
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
+ api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
+
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
- bus = SystemBus()
- systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
- manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
+ bus = SystemBus()
+ systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
+ manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
@@ -1106,10 +1258,20 @@ def is_service_running(service):
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
+ # TODO: do not swallow exception, as it may be hiding useful debugging
+ # information.
pass
return service_running
+
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1120,9 +1282,10 @@ def get_version_output(binary, version_cmd):
cmd.append(item)
if os.path.isfile(cmd[0]):
- _, output, _ = module.run_command(cmd)
+ _, output, _ = module.run_command(cmd) # noqa: F405
return output
+
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
@@ -1135,6 +1298,26 @@ def get_docker_version_info():
}
return result
+
+def get_hosted_registry_insecure():
+ """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
+ registry is currently insecure.
+ """
+ hosted_registry_insecure = None
+ if os.path.exists('/etc/sysconfig/docker'):
+ try:
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_fp = io.StringIO(ini_str)
+ config = configparser.RawConfigParser()
+ config.readfp(ini_fp)
+ options = config.get('root', 'OPTIONS')
+ if 'insecure-registry' in options:
+ hosted_registry_insecure = True
+ except Exception: # pylint: disable=broad-except
+ pass
+ return hosted_registry_insecure
+
+
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1151,12 +1334,13 @@ def get_openshift_version(facts):
# No need to run this method repeatedly on a system if we already know the
# version
+ # TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
- return facts['common']['version']
+ return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
- _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
@@ -1165,10 +1349,30 @@ def get_openshift_version(facts):
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
- _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
- return version
+ return chomp_commit_offset(version)
+
+
+def chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
def get_container_openshift_version(facts):
@@ -1235,9 +1439,10 @@ def apply_provider_facts(facts, provider_facts):
facts['provider'] = provider_facts
return facts
+
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
-# pylint: disable=too-many-branches
+# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
@@ -1260,16 +1465,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
- 'image_policy_config']
+ 'image_policy_config',
+ "builddefaults",
+ "buildoverrides"]
facts = dict()
- for key, value in orig.iteritems():
+ for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1306,16 +1513,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# The master count (int) can only increase unless it
# has been passed as a protected fact to overwrite.
- if key == 'master_count':
+ if key == 'master_count' and new[key] is not None and new[key] is not '':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
- module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
@@ -1329,12 +1538,13 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
+
def save_local_facts(filename, facts):
""" Save local facts
@@ -1344,10 +1554,13 @@ def save_local_facts(filename, facts):
"""
try:
fact_dir = os.path.dirname(filename)
- if not os.path.exists(fact_dir):
- os.makedirs(fact_dir)
+ try:
+ os.makedirs(fact_dir) # try to make the directory
+ except OSError as exception:
+ if exception.errno != errno.EEXIST: # but it is okay if it is already there
+ raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
- fact_file.write(module.jsonify(facts))
+ fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
@@ -1366,15 +1579,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1383,6 +1596,7 @@ def get_local_facts_from_file(filename):
return local_facts
+
def sort_unique(alist):
""" Sorts and de-dupes a list
@@ -1400,6 +1614,7 @@ def sort_unique(alist):
return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1410,12 +1625,9 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+
def set_proxy_facts(facts):
- """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
- no_proxy to the more specific builddefaults and builddefaults_git vars.
- 1. http_proxy, https_proxy, no_proxy
- 2. builddefaults_*
- 3. builddefaults_git_*
+ """ Set global proxy facts
Args:
facts(dict): existing facts
@@ -1425,20 +1637,33 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and \
- isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
- if 'generate_no_proxy_hosts' in common and \
- safe_get_bool(common['generate_no_proxy_hosts']):
+ if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
- common['no_proxy'].append('.' + common['dns_domain'])
- # We always add ourselves no matter what
+ # We always add local dns domain and ourselves no matter what
+ common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
+ return facts
+
+
+def set_builddefaults_facts(facts):
+ """ Set build defaults including setting proxy values from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
@@ -1448,27 +1673,46 @@ def set_proxy_facts(facts):
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
- # make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
- builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
+
+ # Create git specific facts from generic values, if git specific values are
+ # not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
- # If we're actually defining a proxy config then create kube_admission_plugin_config
- # if it doesn't exist, then merge builddefaults[config] structure
- # into kube_admission_plugin_config
- if 'kube_admission_plugin_config' not in facts['master']:
- facts['master']['kube_admission_plugin_config'] = dict()
- if 'config' in builddefaults and ('http_proxy' in builddefaults or \
- 'https_proxy' in builddefaults):
- facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
- facts['builddefaults'] = builddefaults
+ if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
+ builddefaults['git_no_proxy'] = builddefaults['no_proxy']
+ # If we're actually defining a builddefaults config then create admission_plugin_config
+ # then merge builddefaults[config] structure into admission_plugin_config
+ if 'config' in builddefaults:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ facts['master']['admission_plugin_config'].update(builddefaults['config'])
+ return facts
+
+
+def set_buildoverrides_facts(facts):
+ """ Set build overrides
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'buildoverrides' in facts:
+ buildoverrides = facts['buildoverrides']
+ # If we're actually defining a buildoverrides config then create admission_plugin_config
+ # then merge buildoverrides[config] structure into admission_plugin_config
+ if 'config' in buildoverrides:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1540,6 +1784,7 @@ def set_container_facts_if_unset(facts):
return facts
+
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
@@ -1554,7 +1799,7 @@ def set_installed_variant_rpm_facts(facts):
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
+ exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
@@ -1562,7 +1807,6 @@ def set_installed_variant_rpm_facts(facts):
return facts
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1603,13 +1847,13 @@ class OpenShiftFacts(object):
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
+ 'buildoverrides',
'clock',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
- 'loadbalancer',
'master',
'node']
@@ -1631,12 +1875,12 @@ class OpenShiftFacts(object):
try:
# ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
- self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+ self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
for (k, v) in self.system_facts.items():
self.system_facts["ansible_%s" % k.replace('-', '_')] = v
except UnboundLocalError:
# ansible-2.2
- self.system_facts = get_all_facts(module)['ansible_facts']
+ self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
@@ -1669,7 +1913,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
roles = local_facts.keys()
-
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
@@ -1696,18 +1939,21 @@ class OpenShiftFacts(object):
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_evacuate_or_drain_option(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
+ facts = set_builddefaults_facts(facts)
+ facts = set_buildoverrides_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
@@ -1724,7 +1970,7 @@ class OpenShiftFacts(object):
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
- exit_code, output, _ = module.run_command(['hostname', '-f'])
+ exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
@@ -1743,22 +1989,6 @@ class OpenShiftFacts(object):
debug_level=2)
if 'master' in roles:
- scheduler_predicates = [
- {"name": "MatchNodeSelector"},
- {"name": "PodFitsResources"},
- {"name": "PodFitsPorts"},
- {"name": "NoDiskConflict"},
- {"name": "NoVolumeZoneConflict"},
- {"name": "MaxEBSVolumeCount"},
- {"name": "MaxGCEPDVolumeCount"},
- {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
- ]
- scheduler_priorities = [
- {"name": "LeastRequestedPriority", "weight": 1},
- {"name": "SelectorSpreadPriority", "weight": 1},
- {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
- ]
-
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
@@ -1775,8 +2005,6 @@ class OpenShiftFacts(object):
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
- scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities,
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
@@ -1788,17 +2016,24 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
- hosted_registry_insecure=True,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
+ hosted_registry_insecure = get_hosted_registry_insecure()
+ if hosted_registry_insecure is not None:
+ docker['hosted_registry_insecure'] = hosted_registry_insecure
defaults['docker'] = docker
if 'clock' in roles:
- exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
chrony_installed = bool(exit_code == 0)
defaults['clock'] = dict(
enabled=True,
@@ -1821,12 +2056,31 @@ class OpenShiftFacts(object):
),
nfs=dict(
directory='/exports',
- options='*(rw,root_squash)'),
- openstack=dict(
- filesystem='ext4',
- volumeID='123'),
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
+ logging=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
host=None,
- access_modes=['ReadWriteOnce'],
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
create_pv=True,
create_pvc=False
)
@@ -1842,7 +2096,9 @@ class OpenShiftFacts(object):
directory='/exports',
options='*(rw,root_squash)'),
host=None,
- access_modes=['ReadWriteMany'],
+ access=dict(
+ modes=['ReadWriteMany']
+ ),
create_pv=True,
create_pvc=True
)
@@ -1850,13 +2106,6 @@ class OpenShiftFacts(object):
router=dict()
)
- if 'loadbalancer' in roles:
- loadbalancer = dict(frontend_port='8443',
- default_maxconn='20000',
- global_maxconn='20000',
- limit_nofile='100000')
- defaults['loadbalancer'] = loadbalancer
-
return defaults
def guess_host_provider(self):
@@ -1875,7 +2124,7 @@ class OpenShiftFacts(object):
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
- bios_vendor = get_file_content(
+ bios_vendor = get_file_content( # noqa: F405
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
@@ -1890,8 +2139,7 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
- elif (virt_type == 'xen' and virt_role == 'guest'
- and re.match(r'.*\.amazon$', product_version)):
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
@@ -1952,7 +2200,7 @@ class OpenShiftFacts(object):
# Determine if any of the provided variable structures match the fact.
matching_structure = None
- if openshift_env_structures != None:
+ if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
@@ -1976,7 +2224,7 @@ class OpenShiftFacts(object):
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
- #pylint: disable=too-many-branches, too-many-locals
+ # pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
@@ -2004,8 +2252,8 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env != None:
- for fact, value in openshift_env.iteritems():
+ if openshift_env != {} and openshift_env is not None:
+ for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
@@ -2033,16 +2281,16 @@ class OpenShiftFacts(object):
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists
- for cat in ['additional', 'blocked', 'insecure']:
+ for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -2050,7 +2298,7 @@ class OpenShiftFacts(object):
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
- if not module.check_mode:
+ if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
@@ -2063,7 +2311,7 @@ class OpenShiftFacts(object):
facts (dict): facts to clean
"""
facts_to_remove = []
- for fact, value in facts.iteritems():
+ for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
@@ -2083,10 +2331,10 @@ class OpenShiftFacts(object):
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
+ # pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
- module.fail_json(msg=msg,
- changed=self.changed)
+ module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
@@ -2138,13 +2386,14 @@ class OpenShiftFacts(object):
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
+
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
@@ -2158,16 +2407,19 @@ def main():
add_file_common_args=True,
)
- module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
- module.params['gather_timeout'] = 10
- module.params['filter'] = '*'
+ if not HAVE_DBUS:
+ module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
+
+ module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
+ module.params['gather_timeout'] = 10 # noqa: F405
+ module.params['filter'] = '*' # noqa: F405
- role = module.params['role']
- local_facts = module.params['local_facts']
- additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
- openshift_env = module.params['openshift_env']
- openshift_env_structures = module.params['openshift_env_structures']
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
+ role = module.params['role'] # noqa: F405
+ local_facts = module.params['local_facts'] # noqa: F405
+ additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
+ openshift_env = module.params['openshift_env'] # noqa: F405
+ openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
+ protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
@@ -2179,21 +2431,15 @@ def main():
openshift_env_structures,
protected_facts_to_overwrite)
- file_params = module.params.copy()
+ file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
- file_args = module.load_file_common_arguments(file_params)
- changed = module.set_fs_attributes_if_different(file_args,
+ file_args = module.load_file_common_arguments(file_params) # noqa: F405
+ changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
- return module.exit_json(changed=changed,
+ return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.facts import *
-from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index afeb78f95..b7b521f1a 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,18 +10,16 @@
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-- name: Ensure PyYaml is installed
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not l_is_atomic | bool
-
-- name: Ensure yum-utils is installed
- action: "{{ ansible_pkg_mgr }} name=yum-utils state=present"
+- name: Ensure various deps are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
local_facts:
+ debug_level: "{{ openshift_debug_level | default(2) }}"
# TODO: Deprecate deployment_type in favor of openshift_deployment_type
deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
@@ -37,6 +35,8 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: Set repoquery command
set_fact:
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
new file mode 100644
index 000000000..9c3110ff6
--- /dev/null
+++ b/roles/openshift_facts/vars/main.yml
@@ -0,0 +1,7 @@
+---
+required_packages:
+ - iproute
+ - python-dbus
+ - python-six
+ - PyYAML
+ - yum-utils