summaryrefslogtreecommitdiffstats
path: root/roles/openshift_facts/library/openshift_facts.py
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_facts/library/openshift_facts.py')
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py443
1 files changed, 291 insertions, 152 deletions
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9754687cf..10121f82a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,18 +7,39 @@
"""Ansible module for retrieving and setting openshift related facts"""
-import ConfigParser
+# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
+import errno
+import json
+import re
import io
import os
import yaml
-from distutils.util import strtobool
-from distutils.version import LooseVersion
import struct
import socket
-from dbus import SystemBus, Interface
-from dbus.exceptions import DBusException
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+from six import string_types, text_type
+from six.moves import configparser
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import * # noqa: F403
+from ansible.module_utils.facts import * # noqa: F403
+from ansible.module_utils.urls import * # noqa: F403
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+from ansible.module_utils._text import to_native
+
+HAVE_DBUS = False
+
+try:
+ from dbus import SystemBus, Interface
+ from dbus.exceptions import DBusException
+ HAVE_DBUS = True
+except ImportError:
+ pass
DOCUMENTATION = '''
---
@@ -47,6 +68,7 @@ def migrate_docker_facts(facts):
}
if 'docker' not in facts:
facts['docker'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -60,11 +82,12 @@ def migrate_docker_facts(facts):
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
+
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
@@ -75,6 +98,7 @@ def migrate_common_facts(facts):
}
if 'common' not in facts:
facts['common'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -82,6 +106,7 @@ def migrate_common_facts(facts):
facts['common'][param] = facts[role].pop(param)
return facts
+
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
@@ -89,6 +114,7 @@ def migrate_node_facts(facts):
}
if 'node' not in facts:
facts['node'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -96,14 +122,6 @@ def migrate_node_facts(facts):
facts['node'][param] = facts[role].pop(param)
return facts
-def migrate_local_facts(facts):
- """ Apply migrations of local facts """
- migrated_facts = copy.deepcopy(facts)
- migrated_facts = migrate_docker_facts(migrated_facts)
- migrated_facts = migrate_common_facts(migrated_facts)
- migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
- return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
@@ -122,6 +140,34 @@ def migrate_hosted_facts(facts):
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
+
+def migrate_admission_plugin_facts(facts):
+ """ Apply migrations for admission plugin facts """
+ if 'master' in facts:
+ if 'kube_admission_plugin_config' in facts['master']:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ # Merge existing kube_admission_plugin_config with admission_plugin_config.
+ facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
+ facts['master']['kube_admission_plugin_config'],
+ additive_facts_to_overwrite=[],
+ protected_facts_to_overwrite=[])
+ # Remove kube_admission_plugin_config fact
+ facts['master'].pop('kube_admission_plugin_config', None)
+ return facts
+
+
+def migrate_local_facts(facts):
+ """ Apply migrations of local facts """
+ migrated_facts = copy.deepcopy(facts)
+ migrated_facts = migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_common_facts(migrated_facts)
+ migrated_facts = migrate_node_facts(migrated_facts)
+ migrated_facts = migrate_hosted_facts(migrated_facts)
+ migrated_facts = migrate_admission_plugin_facts(migrated_facts)
+ return migrated_facts
+
+
def first_ip(network):
""" Return the first IPv4 address in network
@@ -130,13 +176,14 @@ def first_ip(network):
Returns:
str: first IPv4 address
"""
- atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
- itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
+
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -174,11 +221,8 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames
- if (i is not None and isinstance(i, basestring)
- and re.match(ip_regex, i))]
- hosts = [i for i in hostnames
- if i is not None and i != '' and i not in ips]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
+ hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
@@ -198,13 +242,13 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
Returns:
dict or list: metadata request result
"""
- result, info = fetch_url(module, metadata_url, headers=headers)
+ result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
- return module.from_json(result.read())
+ return module.from_json(to_native(result.read())) # noqa: F405
else:
- return [line.strip() for line in result.readlines()]
+ return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
@@ -312,9 +356,9 @@ def normalize_aws_facts(metadata, facts):
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
- for ips_var, int_var in var_map.iteritems():
+ for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -363,7 +407,7 @@ def normalize_openstack_facts(metadata, facts):
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
+ for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
@@ -404,6 +448,7 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
@@ -421,6 +466,7 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
@@ -438,6 +484,7 @@ def set_nuage_facts_if_unset(facts):
facts['common']['use_nuage'] = use_nuage
return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -455,6 +502,7 @@ def set_node_schedulability(facts):
facts['node']['schedulable'] = True
return facts
+
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -491,6 +539,7 @@ def set_selectors(facts):
return facts
+
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
@@ -510,6 +559,7 @@ def set_dnsmasq_facts_if_unset(facts):
return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -537,6 +587,7 @@ def set_project_cfg_facts_if_unset(facts):
return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -563,6 +614,7 @@ def set_identity_providers_if_unset(facts):
return facts
+
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -622,7 +674,6 @@ def set_url_facts_if_unset(facts):
host,
ports[prefix]))
-
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
@@ -638,6 +689,7 @@ def set_url_facts_if_unset(facts):
return facts
+
def set_aggregate_facts(facts):
""" Set aggregate facts
@@ -715,9 +767,9 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -733,6 +785,7 @@ def set_etcd_facts_if_unset(facts):
return facts
+
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
@@ -813,6 +866,21 @@ def set_deployment_facts_if_unset(facts):
return facts
+
+def set_evacuate_or_drain_option(facts):
+ """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
+'--drain'. Let's make that a fact for easy reference later.
+ """
+ if facts['common']['version_gte_3_5_or_1_5']:
+ # New-style
+ facts['common']['evacuate_or_drain'] = '--drain'
+ else:
+ # Old-style
+ facts['common']['evacuate_or_drain'] = '--evacuate'
+
+ return facts
+
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -824,31 +892,46 @@ def set_version_facts_if_unset(facts):
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
- version = get_openshift_version(facts)
- if version:
- facts['common']['version'] = version
+ openshift_version = get_openshift_version(facts)
+ if openshift_version:
+ version = LooseVersion(openshift_version)
+ facts['common']['version'] = openshift_version
+ facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
else:
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
- version_gte_3_3_or_1_3 = False
+ version_gte_3_3_or_1_3 = True
+ version_gte_3_4_or_1_4 = False
+ version_gte_3_5_or_1_5 = False
+ version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
+ facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
+ facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
+ facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
-
- if version_gte_3_3_or_1_3:
+ if version_gte_3_4_or_1_4:
+ examples_content_version = 'v1.4'
+ elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
elif version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
@@ -861,6 +944,7 @@ def set_version_facts_if_unset(facts):
return facts
+
def set_manageiq_facts_if_unset(facts):
""" Set manageiq facts. This currently includes common.use_manageiq.
@@ -881,6 +965,7 @@ def set_manageiq_facts_if_unset(facts):
return facts
+
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -891,6 +976,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
+ # pylint: disable=too-many-branches
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
@@ -931,7 +1017,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
- for val in system_facts.itervalues():
+ for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
@@ -940,7 +1026,9 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+
def set_nodename(facts):
+ """ set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
@@ -948,6 +1036,7 @@ def set_nodename(facts):
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
+
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -967,6 +1056,7 @@ def migrate_oauth_template_facts(facts):
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
+
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -989,6 +1079,7 @@ def format_url(use_ssl, hostname, port, path=''):
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
+
def get_current_config(facts):
""" Get current openshift config
@@ -1018,10 +1109,9 @@ def get_current_config(facts):
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
- if (os.path.isfile('/usr/bin/openshift')
- and os.path.isfile(kubeconfig_path)):
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
- _, output, _ = module.run_command(
+ _, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
@@ -1052,6 +1142,7 @@ def get_current_config(facts):
return current_config
+
def build_kubelet_args(facts):
"""Build node kubelet_args
@@ -1096,6 +1187,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
#
# map() seems to be returning an itertools.imap object
# instead of a list. We cast it to a list ourselves.
+ # pylint: disable=unnecessary-lambda
labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
if labels_str != '':
kubelet_args['node-labels'] = labels_str
@@ -1106,6 +1198,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
+
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1127,6 +1220,7 @@ def build_controller_args(facts):
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
+
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1148,13 +1242,14 @@ def build_api_server_args(facts):
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
+
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
- bus = SystemBus()
- systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
- manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
+ bus = SystemBus()
+ systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
+ manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
@@ -1163,10 +1258,20 @@ def is_service_running(service):
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
+ # TODO: do not swallow exception, as it may be hiding useful debugging
+ # information.
pass
return service_running
+
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1177,9 +1282,10 @@ def get_version_output(binary, version_cmd):
cmd.append(item)
if os.path.isfile(cmd[0]):
- _, output, _ = module.run_command(cmd)
+ _, output, _ = module.run_command(cmd) # noqa: F405
return output
+
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
@@ -1192,6 +1298,7 @@ def get_docker_version_info():
}
return result
+
def get_hosted_registry_insecure():
""" Parses OPTIONS from /etc/sysconfig/docker to determine if the
registry is currently insecure.
@@ -1199,17 +1306,18 @@ def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
- ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
- except:
+ except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
+
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1226,12 +1334,13 @@ def get_openshift_version(facts):
# No need to run this method repeatedly on a system if we already know the
# version
+ # TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
- _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
@@ -1240,7 +1349,7 @@ def get_openshift_version(facts):
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
- _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
return chomp_commit_offset(version)
@@ -1330,9 +1439,10 @@ def apply_provider_facts(facts, provider_facts):
facts['provider'] = provider_facts
return facts
+
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
-# pylint: disable=too-many-branches
+# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
@@ -1355,16 +1465,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
- 'image_policy_config']
+ 'image_policy_config',
+ "builddefaults",
+ "buildoverrides"]
facts = dict()
- for key, value in orig.iteritems():
+ for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1401,16 +1513,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# The master count (int) can only increase unless it
# has been passed as a protected fact to overwrite.
- if key == 'master_count':
+ if key == 'master_count' and new[key] is not None and new[key] is not '':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
- module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
@@ -1424,12 +1538,13 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
+
def save_local_facts(filename, facts):
""" Save local facts
@@ -1445,7 +1560,7 @@ def save_local_facts(filename, facts):
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
- fact_file.write(module.jsonify(facts))
+ fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
@@ -1464,15 +1579,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1481,6 +1596,7 @@ def get_local_facts_from_file(filename):
return local_facts
+
def sort_unique(alist):
""" Sorts and de-dupes a list
@@ -1498,6 +1614,7 @@ def sort_unique(alist):
return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1508,12 +1625,9 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+
def set_proxy_facts(facts):
- """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
- no_proxy to the more specific builddefaults and builddefaults_git vars.
- 1. http_proxy, https_proxy, no_proxy
- 2. builddefaults_*
- 3. builddefaults_git_*
+ """ Set global proxy facts
Args:
facts(dict): existing facts
@@ -1523,13 +1637,11 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and \
- isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
- if 'generate_no_proxy_hosts' in common and \
- safe_get_bool(common['generate_no_proxy_hosts']):
+ if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
@@ -1537,6 +1649,21 @@ def set_proxy_facts(facts):
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
+ return facts
+
+
+def set_builddefaults_facts(facts):
+ """ Set build defaults including setting proxy values from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
@@ -1546,27 +1673,46 @@ def set_proxy_facts(facts):
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
- # make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
- builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
+
+ # Create git specific facts from generic values, if git specific values are
+ # not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
- # If we're actually defining a proxy config then create kube_admission_plugin_config
- # if it doesn't exist, then merge builddefaults[config] structure
- # into kube_admission_plugin_config
- if 'kube_admission_plugin_config' not in facts['master']:
- facts['master']['kube_admission_plugin_config'] = dict()
- if 'config' in builddefaults and ('http_proxy' in builddefaults or \
- 'https_proxy' in builddefaults):
- facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
- facts['builddefaults'] = builddefaults
+ if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
+ builddefaults['git_no_proxy'] = builddefaults['no_proxy']
+ # If we're actually defining a builddefaults config then create admission_plugin_config
+ # then merge builddefaults[config] structure into admission_plugin_config
+ if 'config' in builddefaults:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ facts['master']['admission_plugin_config'].update(builddefaults['config'])
+ return facts
+
+
+def set_buildoverrides_facts(facts):
+ """ Set build overrides
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'buildoverrides' in facts:
+ buildoverrides = facts['buildoverrides']
+ # If we're actually defining a buildoverrides config then create admission_plugin_config
+ # then merge buildoverrides[config] structure into admission_plugin_config
+ if 'config' in buildoverrides:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1638,6 +1784,7 @@ def set_container_facts_if_unset(facts):
return facts
+
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
@@ -1652,7 +1799,7 @@ def set_installed_variant_rpm_facts(facts):
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
+ exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
@@ -1660,7 +1807,6 @@ def set_installed_variant_rpm_facts(facts):
return facts
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1701,6 +1847,7 @@ class OpenShiftFacts(object):
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
+ 'buildoverrides',
'clock',
'cloudprovider',
'common',
@@ -1728,12 +1875,12 @@ class OpenShiftFacts(object):
try:
# ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
- self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+ self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
for (k, v) in self.system_facts.items():
self.system_facts["ansible_%s" % k.replace('-', '_')] = v
except UnboundLocalError:
# ansible-2.2
- self.system_facts = get_all_facts(module)['ansible_facts']
+ self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
@@ -1766,7 +1913,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
roles = local_facts.keys()
-
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
@@ -1800,11 +1946,14 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_evacuate_or_drain_option(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
+ facts = set_builddefaults_facts(facts)
+ facts = set_buildoverrides_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
@@ -1821,7 +1970,7 @@ class OpenShiftFacts(object):
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
- exit_code, output, _ = module.run_command(['hostname', '-f'])
+ exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
@@ -1840,22 +1989,6 @@ class OpenShiftFacts(object):
debug_level=2)
if 'master' in roles:
- scheduler_predicates = [
- {"name": "MatchNodeSelector"},
- {"name": "PodFitsResources"},
- {"name": "PodFitsPorts"},
- {"name": "NoDiskConflict"},
- {"name": "NoVolumeZoneConflict"},
- {"name": "MaxEBSVolumeCount"},
- {"name": "MaxGCEPDVolumeCount"},
- {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
- ]
- scheduler_priorities = [
- {"name": "LeastRequestedPriority", "weight": 1},
- {"name": "SelectorSpreadPriority", "weight": 1},
- {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
- ]
-
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
@@ -1872,8 +2005,6 @@ class OpenShiftFacts(object):
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
- scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities,
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
@@ -1886,6 +2017,11 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
@@ -1897,7 +2033,7 @@ class OpenShiftFacts(object):
defaults['docker'] = docker
if 'clock' in roles:
- exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
chrony_installed = bool(exit_code == 0)
defaults['clock'] = dict(
enabled=True,
@@ -1923,7 +2059,9 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'
),
host=None,
- access_modes=['ReadWriteOnce'],
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
create_pv=True,
create_pvc=False
)
@@ -1940,7 +2078,9 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'
),
host=None,
- access_modes=['ReadWriteOnce'],
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
create_pv=True,
create_pvc=False
)
@@ -1956,7 +2096,9 @@ class OpenShiftFacts(object):
directory='/exports',
options='*(rw,root_squash)'),
host=None,
- access_modes=['ReadWriteMany'],
+ access=dict(
+ modes=['ReadWriteMany']
+ ),
create_pv=True,
create_pvc=True
)
@@ -1982,7 +2124,7 @@ class OpenShiftFacts(object):
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
- bios_vendor = get_file_content(
+ bios_vendor = get_file_content( # noqa: F405
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
@@ -1997,8 +2139,7 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
- elif (virt_type == 'xen' and virt_role == 'guest'
- and re.match(r'.*\.amazon$', product_version)):
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
@@ -2059,7 +2200,7 @@ class OpenShiftFacts(object):
# Determine if any of the provided variable structures match the fact.
matching_structure = None
- if openshift_env_structures != None:
+ if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
@@ -2083,7 +2224,7 @@ class OpenShiftFacts(object):
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
- #pylint: disable=too-many-branches, too-many-locals
+ # pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
@@ -2111,8 +2252,8 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env != None:
- for fact, value in openshift_env.iteritems():
+ if openshift_env != {} and openshift_env is not None:
+ for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
@@ -2140,16 +2281,16 @@ class OpenShiftFacts(object):
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists
- for cat in ['additional', 'blocked', 'insecure']:
+ for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -2157,7 +2298,7 @@ class OpenShiftFacts(object):
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
- if not module.check_mode:
+ if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
@@ -2170,7 +2311,7 @@ class OpenShiftFacts(object):
facts (dict): facts to clean
"""
facts_to_remove = []
- for fact, value in facts.iteritems():
+ for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
@@ -2190,10 +2331,10 @@ class OpenShiftFacts(object):
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
+ # pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
- module.fail_json(msg=msg,
- changed=self.changed)
+ module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
@@ -2245,13 +2386,14 @@ class OpenShiftFacts(object):
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
+
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
@@ -2265,16 +2407,19 @@ def main():
add_file_common_args=True,
)
- module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
- module.params['gather_timeout'] = 10
- module.params['filter'] = '*'
+ if not HAVE_DBUS:
+ module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
+
+ module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
+ module.params['gather_timeout'] = 10 # noqa: F405
+ module.params['filter'] = '*' # noqa: F405
- role = module.params['role']
- local_facts = module.params['local_facts']
- additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
- openshift_env = module.params['openshift_env']
- openshift_env_structures = module.params['openshift_env_structures']
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
+ role = module.params['role'] # noqa: F405
+ local_facts = module.params['local_facts'] # noqa: F405
+ additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
+ openshift_env = module.params['openshift_env'] # noqa: F405
+ openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
+ protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
@@ -2286,21 +2431,15 @@ def main():
openshift_env_structures,
protected_facts_to_overwrite)
- file_params = module.params.copy()
+ file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
- file_args = module.load_file_common_arguments(file_params)
- changed = module.set_fs_attributes_if_different(file_args,
+ file_args = module.load_file_common_arguments(file_params) # noqa: F405
+ changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
- return module.exit_json(changed=changed,
+ return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.facts import *
-from ansible.module_utils.urls import *
if __name__ == '__main__':
main()