summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rwxr-xr-xroles/etcd_common/library/delegated_serial_command.py18
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py23
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py24
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py3
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py238
-rw-r--r--roles/openshift_master/vars/main.yml7
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py96
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py87
-rw-r--r--roles/openshift_master_facts/tasks/main.yml37
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py174
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py164
-rw-r--r--roles/openshift_master_facts/vars/main.yml5
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py48
14 files changed, 748 insertions, 182 deletions
diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd_common/library/delegated_serial_command.py
index 84d4f97c2..0cab1ca88 100755
--- a/roles/etcd_common/library/delegated_serial_command.py
+++ b/roles/etcd_common/library/delegated_serial_command.py
@@ -24,12 +24,9 @@
''' delegated_serial_command '''
-import copy
-import sys
import datetime
+import errno
import glob
-import traceback
-import re
import shlex
import os
import fcntl
@@ -133,6 +130,7 @@ OPTIONS = {'chdir': None,
'lockfile': None,
'timeout': None}
+
def check_command(commandline):
''' Check provided command '''
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
@@ -160,7 +158,7 @@ def check_command(commandline):
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
def main():
''' Main module function '''
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
_uses_shell=dict(type='bool', default=False),
command=dict(required=True),
@@ -220,9 +218,9 @@ def main():
)
if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
path = os.path.expanduser(removes)
if not glob.glob(path):
module.exit_json(
@@ -268,7 +266,9 @@ def main():
iterated=iterated
)
+
# import module snippets
-from ansible.module_utils.basic import *
+# pylint: disable=wrong-import-position
+from ansible.module_utils.basic import * # noqa: F402,F403
main()
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 9bd3228c1..2cd454274 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -52,7 +52,7 @@ options:
partitions. On 1 TiB disk, 10 partitions will be created.
- Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
- partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
+ partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
will be split into five 100 GiB partition and 500 GiB will be split into fifty
10GiB partitions.
- size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
@@ -73,7 +73,7 @@ options:
and eight 50 GiB partitions (again, 400 GiB).
- size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
(on 1 TiB disk).
-
+
force:
description:
- If True, it will always overwite partition table on the disk and create new one.
@@ -81,6 +81,7 @@ options:
"""
+
# It's not class, it's more a simple struct with almost no functionality.
# pylint: disable=too-few-public-methods
class PartitionSpec(object):
@@ -98,6 +99,7 @@ class PartitionSpec(object):
""" Set count of parititions of this specification. """
self.count = count
+
def assign_space(total_size, specs):
"""
Satisfy all the PartitionSpecs according to their weight.
@@ -113,6 +115,7 @@ def assign_space(total_size, specs):
total_size -= num_blocks * spec.size
total_weight -= spec.weight
+
def partition(diskname, specs, force=False, check_mode=False):
"""
Create requested partitions.
@@ -161,16 +164,17 @@ def partition(diskname, specs, force=False, check_mode=False):
pass
return count
+
def parse_spec(text):
""" Parse string with partition specification. """
tokens = text.split(",")
specs = []
for token in tokens:
- if not ":" in token:
+ if ":" not in token:
token += ":1"
(sizespec, weight) = token.split(':')
- weight = float(weight) # throws exception with reasonable error string
+ weight = float(weight) # throws exception with reasonable error string
units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
unit = units.get(sizespec[-1].lower(), None)
@@ -184,6 +188,7 @@ def parse_spec(text):
specs.append(spec)
return specs
+
def get_partitions(diskpath):
""" Return array of partition names for given disk """
dev = parted.getDevice(diskpath)
@@ -198,7 +203,7 @@ def get_partitions(diskpath):
def main():
""" Ansible module main method. """
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
disks=dict(required=True, type='str'),
force=dict(required=False, default="no", type='bool'),
@@ -227,14 +232,14 @@ def main():
except Exception, ex:
err = "Error creating partitions on " + disk + ": " + str(ex)
raise
- #module.fail_json(msg=err)
+ # module.fail_json(msg=err)
partitions += get_partitions(disk)
module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
+
# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-order, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: E402,F403
main()
-
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 2e2430ee6..bedd23fe8 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -5,29 +5,6 @@
Custom filters for use in openshift-ansible
"""
-from ansible import errors
-from collections import Mapping
-from distutils.util import strtobool
-from distutils.version import LooseVersion
-from operator import itemgetter
-import OpenSSL.crypto
-import os
-import pdb
-import pkg_resources
-import re
-import json
-import yaml
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from urlparse import urlparse
-
-try:
- # ansible-2.2
- # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
- # ansible.module_utils._text.to_text should be used instead.
- from ansible.module_utils._text import to_text
-except ImportError:
- # ansible-2.1
- from ansible.utils.unicode import to_unicode as to_text
# Disabling too-many-public-methods, since filter methods are necessarily
# public
@@ -80,7 +57,6 @@ Example playbook usage:
return json_result
-
def filters(self):
""" returns a mapping of filters to methods """
return {
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 2cdb87dc1..e838eb2d4 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -628,10 +628,11 @@ an OpenShift Container Platform cluster
changed=False
)
+
######################################################################
# It's just the way we do things in Ansible. So disable this warning
#
# pylint: disable=wrong-import-position,import-error
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule # noqa: E402
if __name__ == '__main__':
main()
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 9ff738d14..4ed3e1f01 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -10,7 +10,7 @@ import shutil
import os.path
# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: F403
DOCUMENTATION = '''
@@ -40,7 +40,7 @@ class BinarySyncer(object):
self.bin_dir = '/usr/local/bin'
self.image = image
self.tag = tag
- self.temp_dir = None # TBD
+ self.temp_dir = None # TBD
def sync(self):
container_name = "openshift-cli-%s" % random.randint(1, 100000)
@@ -110,7 +110,7 @@ class BinarySyncer(object):
def main():
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
image=dict(required=True),
tag=dict(required=True),
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ad4b1e47b..95a9d668a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -14,20 +14,35 @@ except ImportError:
# python3
import configparser as ConfigParser
+# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
+import errno
+import json
+import re
import io
import os
import yaml
-from distutils.util import strtobool
-from distutils.version import LooseVersion
import struct
import socket
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import * # noqa: F403
+from ansible.module_utils.facts import * # noqa: F403
+from ansible.module_utils.urls import * # noqa: F403
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+from ansible.module_utils._text import to_native
+
+HAVE_DBUS = False
-HAVE_DBUS=False
try:
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
- HAVE_DBUS=True
+ HAVE_DBUS = True
except ImportError:
pass
@@ -58,6 +73,7 @@ def migrate_docker_facts(facts):
}
if 'docker' not in facts:
facts['docker'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -76,6 +92,7 @@ def migrate_docker_facts(facts):
return facts
+
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
@@ -86,6 +103,7 @@ def migrate_common_facts(facts):
}
if 'common' not in facts:
facts['common'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -93,6 +111,7 @@ def migrate_common_facts(facts):
facts['common'][param] = facts[role].pop(param)
return facts
+
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
@@ -100,6 +119,7 @@ def migrate_node_facts(facts):
}
if 'node' not in facts:
facts['node'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -125,7 +145,9 @@ def migrate_hosted_facts(facts):
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
+
def migrate_admission_plugin_facts(facts):
+ """ Apply migrations for admission plugin facts """
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
@@ -139,6 +161,7 @@ def migrate_admission_plugin_facts(facts):
facts['master'].pop('kube_admission_plugin_config', None)
return facts
+
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
@@ -149,6 +172,7 @@ def migrate_local_facts(facts):
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
+
def first_ip(network):
""" Return the first IPv4 address in network
@@ -157,13 +181,14 @@ def first_ip(network):
Returns:
str: first IPv4 address
"""
- atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
- itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
+
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -201,11 +226,8 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames
- if (i is not None and isinstance(i, basestring)
- and re.match(ip_regex, i))]
- hosts = [i for i in hostnames
- if i is not None and i != '' and i not in ips]
+ ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)]
+ hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
@@ -225,11 +247,11 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
Returns:
dict or list: metadata request result
"""
- result, info = fetch_url(module, metadata_url, headers=headers)
+ result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
- return module.from_json(to_native(result.read()))
+ return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
@@ -390,7 +412,7 @@ def normalize_openstack_facts(metadata, facts):
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
+ for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
@@ -431,6 +453,7 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
@@ -448,6 +471,7 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
@@ -465,6 +489,7 @@ def set_nuage_facts_if_unset(facts):
facts['common']['use_nuage'] = use_nuage
return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -482,6 +507,7 @@ def set_node_schedulability(facts):
facts['node']['schedulable'] = True
return facts
+
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -518,6 +544,7 @@ def set_selectors(facts):
return facts
+
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
@@ -537,6 +564,7 @@ def set_dnsmasq_facts_if_unset(facts):
return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -564,6 +592,7 @@ def set_project_cfg_facts_if_unset(facts):
return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -590,6 +619,7 @@ def set_identity_providers_if_unset(facts):
return facts
+
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -649,7 +679,6 @@ def set_url_facts_if_unset(facts):
host,
ports[prefix]))
-
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
@@ -665,6 +694,7 @@ def set_url_facts_if_unset(facts):
return facts
+
def set_aggregate_facts(facts):
""" Set aggregate facts
@@ -760,6 +790,7 @@ def set_etcd_facts_if_unset(facts):
return facts
+
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
@@ -840,6 +871,7 @@ def set_deployment_facts_if_unset(facts):
return facts
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -851,21 +883,23 @@ def set_version_facts_if_unset(facts):
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
- version = get_openshift_version(facts)
- if version:
- facts['common']['version'] = version
+ openshift_version = get_openshift_version(facts)
+ if openshift_version:
+ version = LooseVersion(openshift_version)
+ facts['common']['version'] = openshift_version
+ facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
else:
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
@@ -878,7 +912,6 @@ def set_version_facts_if_unset(facts):
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
-
if version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
elif version_gte_3_3_or_1_3:
@@ -894,6 +927,7 @@ def set_version_facts_if_unset(facts):
return facts
+
def set_manageiq_facts_if_unset(facts):
""" Set manageiq facts. This currently includes common.use_manageiq.
@@ -914,6 +948,7 @@ def set_manageiq_facts_if_unset(facts):
return facts
+
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -924,6 +959,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
+ # pylint: disable=too-many-branches
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
@@ -973,7 +1009,9 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+
def set_nodename(facts):
+ """ set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
@@ -981,6 +1019,7 @@ def set_nodename(facts):
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
+
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -1000,6 +1039,7 @@ def migrate_oauth_template_facts(facts):
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
+
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -1022,6 +1062,7 @@ def format_url(use_ssl, hostname, port, path=''):
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
+
def get_current_config(facts):
""" Get current openshift config
@@ -1051,10 +1092,9 @@ def get_current_config(facts):
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
- if (os.path.isfile('/usr/bin/openshift')
- and os.path.isfile(kubeconfig_path)):
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
- _, output, _ = module.run_command(
+ _, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
@@ -1085,6 +1125,7 @@ def get_current_config(facts):
return current_config
+
def build_kubelet_args(facts):
"""Build node kubelet_args
@@ -1129,6 +1170,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
#
# map() seems to be returning an itertools.imap object
# instead of a list. We cast it to a list ourselves.
+ # pylint: disable=unnecessary-lambda
labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
if labels_str != '':
kubelet_args['node-labels'] = labels_str
@@ -1139,6 +1181,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
+
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1160,6 +1203,7 @@ def build_controller_args(facts):
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
+
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1181,6 +1225,7 @@ def build_api_server_args(facts):
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
+
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
@@ -1200,6 +1245,7 @@ def is_service_running(service):
return service_running
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1210,9 +1256,10 @@ def get_version_output(binary, version_cmd):
cmd.append(item)
if os.path.isfile(cmd[0]):
- _, output, _ = module.run_command(cmd)
+ _, output, _ = module.run_command(cmd) # noqa: F405
return output
+
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
@@ -1225,6 +1272,7 @@ def get_docker_version_info():
}
return result
+
def get_hosted_registry_insecure():
""" Parses OPTIONS from /etc/sysconfig/docker to determine if the
registry is currently insecure.
@@ -1239,10 +1287,12 @@ def get_hosted_registry_insecure():
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
+ # pylint: disable=bare-except
except:
pass
return hosted_registry_insecure
+
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1264,7 +1314,7 @@ def get_openshift_version(facts):
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
- _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
@@ -1273,7 +1323,7 @@ def get_openshift_version(facts):
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
- _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
return chomp_commit_offset(version)
@@ -1363,9 +1413,10 @@ def apply_provider_facts(facts, provider_facts):
facts['provider'] = provider_facts
return facts
+
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
-# pylint: disable=too-many-branches
+# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
@@ -1434,16 +1485,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# The master count (int) can only increase unless it
# has been passed as a protected fact to overwrite.
- if key == 'master_count':
+ if key == 'master_count' and new[key] is not None and new[key] is not '':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
- module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
@@ -1463,6 +1516,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
facts[key] = copy.deepcopy(new[key])
return facts
+
def save_local_facts(filename, facts):
""" Save local facts
@@ -1478,7 +1532,7 @@ def save_local_facts(filename, facts):
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
- fact_file.write(module.jsonify(facts))
+ fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
@@ -1514,6 +1568,7 @@ def get_local_facts_from_file(filename):
return local_facts
+
def sort_unique(alist):
""" Sorts and de-dupes a list
@@ -1531,6 +1586,7 @@ def sort_unique(alist):
return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1541,6 +1597,7 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+
def set_proxy_facts(facts):
""" Set global proxy facts and promote defaults from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
@@ -1556,13 +1613,11 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and \
- isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], basestring):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
- if 'generate_no_proxy_hosts' in common and \
- safe_get_bool(common['generate_no_proxy_hosts']):
+ if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
@@ -1593,13 +1648,14 @@ def set_proxy_facts(facts):
# into admission_plugin_config
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
- if 'config' in builddefaults and ('http_proxy' in builddefaults or \
- 'https_proxy' in builddefaults):
+ if 'config' in builddefaults and ('http_proxy' in builddefaults or
+ 'https_proxy' in builddefaults):
facts['master']['admission_plugin_config'].update(builddefaults['config'])
facts['builddefaults'] = builddefaults
return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1671,6 +1727,7 @@ def set_container_facts_if_unset(facts):
return facts
+
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
@@ -1685,7 +1742,7 @@ def set_installed_variant_rpm_facts(facts):
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
+ exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
@@ -1693,7 +1750,6 @@ def set_installed_variant_rpm_facts(facts):
return facts
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1761,12 +1817,12 @@ class OpenShiftFacts(object):
try:
# ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
- self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+ self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
for (k, v) in self.system_facts.items():
self.system_facts["ansible_%s" % k.replace('-', '_')] = v
except UnboundLocalError:
# ansible-2.2
- self.system_facts = get_all_facts(module)['ansible_facts']
+ self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
@@ -1799,7 +1855,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
roles = local_facts.keys()
-
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
@@ -1854,7 +1909,7 @@ class OpenShiftFacts(object):
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
- exit_code, output, _ = module.run_command(['hostname', '-f'])
+ exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
@@ -1873,22 +1928,6 @@ class OpenShiftFacts(object):
debug_level=2)
if 'master' in roles:
- scheduler_predicates = [
- {"name": "MatchNodeSelector"},
- {"name": "PodFitsResources"},
- {"name": "PodFitsPorts"},
- {"name": "NoDiskConflict"},
- {"name": "NoVolumeZoneConflict"},
- {"name": "MaxEBSVolumeCount"},
- {"name": "MaxGCEPDVolumeCount"},
- {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
- ]
- scheduler_priorities = [
- {"name": "LeastRequestedPriority", "weight": 1},
- {"name": "SelectorSpreadPriority", "weight": 1},
- {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
- ]
-
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
@@ -1905,8 +1944,6 @@ class OpenShiftFacts(object):
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
- scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities,
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
@@ -1930,7 +1967,7 @@ class OpenShiftFacts(object):
defaults['docker'] = docker
if 'clock' in roles:
- exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
chrony_installed = bool(exit_code == 0)
defaults['clock'] = dict(
enabled=True,
@@ -2015,7 +2052,7 @@ class OpenShiftFacts(object):
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
- bios_vendor = get_file_content(
+ bios_vendor = get_file_content( # noqa: F405
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
@@ -2030,8 +2067,7 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
- elif (virt_type == 'xen' and virt_role == 'guest'
- and re.match(r'.*\.amazon$', product_version)):
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
@@ -2092,7 +2128,7 @@ class OpenShiftFacts(object):
# Determine if any of the provided variable structures match the fact.
matching_structure = None
- if openshift_env_structures != None:
+ if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
@@ -2116,7 +2152,7 @@ class OpenShiftFacts(object):
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
- #pylint: disable=too-many-branches, too-many-locals
+ # pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
@@ -2144,7 +2180,7 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env != None:
+ if openshift_env != {} and openshift_env is not None:
for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
@@ -2173,7 +2209,7 @@ class OpenShiftFacts(object):
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists
- for cat in ['additional', 'blocked', 'insecure']:
+ for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
@@ -2190,7 +2226,7 @@ class OpenShiftFacts(object):
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
- if not module.check_mode:
+ if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
@@ -2223,10 +2259,10 @@ class OpenShiftFacts(object):
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
+ # pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
- module.fail_json(msg=msg,
- changed=self.changed)
+ module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
@@ -2278,13 +2314,14 @@ class OpenShiftFacts(object):
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
+
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
@@ -2299,18 +2336,18 @@ def main():
)
if not HAVE_DBUS:
- module.fail_json(msg="This module requires dbus python bindings")
+ module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
- module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
- module.params['gather_timeout'] = 10
- module.params['filter'] = '*'
+ module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
+ module.params['gather_timeout'] = 10 # noqa: F405
+ module.params['filter'] = '*' # noqa: F405
- role = module.params['role']
- local_facts = module.params['local_facts']
- additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
- openshift_env = module.params['openshift_env']
- openshift_env_structures = module.params['openshift_env_structures']
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
+ role = module.params['role'] # noqa: F405
+ local_facts = module.params['local_facts'] # noqa: F405
+ additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
+ openshift_env = module.params['openshift_env'] # noqa: F405
+ openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
+ protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
@@ -2322,24 +2359,15 @@ def main():
openshift_env_structures,
protected_facts_to_overwrite)
- file_params = module.params.copy()
+ file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
- file_args = module.load_file_common_arguments(file_params)
- changed = module.set_fs_attributes_if_different(file_args,
+ file_args = module.load_file_common_arguments(file_params) # noqa: F405
+ changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
- return module.exit_json(changed=changed,
+ return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.facts import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.six import iteritems, itervalues
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import b
if __name__ == '__main__':
main()
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 7c1d5a212..4dce63630 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,17 +1,14 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
scheduler_config:
kind: Policy
apiVersion: v1
- predicates: "{{ openshift.master.scheduler_predicates }}"
- priorities: "{{ openshift.master.scheduler_priorities }}"
+ predicates: "{{ openshift_master_scheduler_predicates }}"
+ priorities: "{{ openshift_master_scheduler_priorities }}"
openshift_master_valid_grant_methods:
- auto
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
new file mode 100644
index 000000000..05b62a7a6
--- /dev/null
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -0,0 +1,96 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements
+
+ def run(self, terms, variables=None, regions_enabled=True, **kwargs):
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+ if 'master' not in variables['openshift']:
+ raise AnsibleError("This lookup module is meant to be run against an OpenShift master host only")
+
+ if 'openshift_master_scheduler_predicates' in variables:
+ return variables['openshift_master_scheduler_predicates']
+ elif 'scheduler_predicates' in variables['openshift']['master']:
+ return variables['openshift']['master']['scheduler_predicates']
+ else:
+ predicates = []
+
+ if 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+ if deployment_type not in ['origin', 'openshift-enterprise']:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift_enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+
+ if deployment_type == 'openshift-enterprise':
+ # convert short_version to origin short_version
+ short_version = re.sub('^3.', '1.', short_version)
+
+ if short_version in ['1.1', '1.2']:
+ predicates.append({'name': 'PodFitsHostPorts'})
+ predicates.append({'name': 'PodFitsResources'})
+
+ # applies to all known versions
+ predicates.append({'name': 'NoDiskConflict'})
+
+ # only 1.1 didn't include NoVolumeZoneConflict
+ if short_version != '1.1':
+ predicates.append({'name': 'NoVolumeZoneConflict'})
+
+ if short_version in ['1.1', '1.2']:
+ predicates.append({'name': 'MatchNodeSelector'})
+ predicates.append({'name': 'Hostname'})
+
+ if short_version != '1.1':
+ predicates.append({'name': 'MaxEBSVolumeCount'})
+ predicates.append({'name': 'MaxGCEPDVolumeCount'})
+
+ if short_version not in ['1.1', '1.2']:
+ predicates.append({'name': 'GeneralPredicates'})
+ predicates.append({'name': 'PodToleratesNodeTaints'})
+ predicates.append({'name': 'CheckNodeMemoryPressure'})
+
+ if short_version not in ['1.1', '1.2', '1.3']:
+ predicates.append({'name': 'CheckNodeDiskPressure'})
+ predicates.append({'name': 'MatchInterPodAffinity'})
+
+ if regions_enabled:
+ region_predicate = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+ }
+ predicates.append(region_predicate)
+
+ return predicates
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
new file mode 100644
index 000000000..3329d26b0
--- /dev/null
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -0,0 +1,87 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches
+
+ def run(self, terms, variables=None, zones_enabled=True, **kwargs):
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+ if 'master' not in variables['openshift']:
+ raise AnsibleError("This lookup module is meant to be run against an OpenShift master host only")
+
+ if 'openshift_master_scheduler_priorities' in variables:
+ return variables['openshift_master_scheduler_priorities']
+ elif 'scheduler_priorities' in variables['openshift']['master']:
+ return variables['openshift']['master']['scheduler_priorities']
+ else:
+ priorities = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ]
+
+ if 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+ if deployment_type not in ['origin', 'openshift-enterprise']:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift_enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+
+ if deployment_type == 'openshift-enterprise':
+ # convert short_version to origin short_version
+ short_version = re.sub('^3.', '1.', short_version)
+
+ if short_version == '1.4':
+ priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
+
+ # only 1.1 didn't include NodeAffinityPriority
+ if short_version != '1.1':
+ priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
+
+ if short_version not in ['1.1', '1.2']:
+ priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
+
+ if short_version not in ['1.1', '1.2', '1.3']:
+ priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
+
+ if zones_enabled:
+ zone_priority = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+ }
+ priorities.append(zone_priority)
+
+ return priorities
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 1f27a2c1d..170861484 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -64,8 +64,6 @@
master_count: "{{ openshift_master_count | default(None) }}"
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
- scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}"
- scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
@@ -79,3 +77,38 @@
audit_config: "{{ openshift_master_audit_config | default(None) }}"
metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
+
+- name: Determine if scheduler config present
+ stat:
+ path: "{{ openshift_master_scheduler_conf }}"
+ register: scheduler_config_stat
+
+- block:
+ - set_fact:
+ openshift_master_scheduler_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
+ when: "{{ openshift_master_scheduler_predicates is not defined }}"
+
+ - set_fact:
+ openshift_master_scheduler_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
+ when: "{{ openshift_master_scheduler_priorities is not defined }}"
+ when: "{{ not scheduler_config_stat.stat.exists }}"
+
+- block:
+ - name: Retrieve current scheduler config
+ slurp:
+ src: "{{ openshift_master_scheduler_conf }}"
+ register: current_scheduler_config
+
+ - fail:
+ msg: "Could not decode scheduler config"
+ when: "{{ (current_scheduler_config.content | b64decode | from_json).apiVersion | default(none) != 'v1' }}"
+
+ - set_fact:
+ openshift_master_scheduler_predicates: "{{ (current_scheduler_config.content | b64decode | from_json).predicates }}"
+ when: "{{ openshift_master_scheduler_predicates is not defined }}"
+
+ - set_fact:
+ openshift_master_scheduler_priorities: "{{ (current_scheduler_config.content | b64decode | from_json).priorities }}"
+ when: "{{ openshift_master_scheduler_priorities is not defined }}"
+
+ when: "{{ scheduler_config_stat.stat.exists }}"
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
new file mode 100644
index 000000000..6f2a916d0
--- /dev/null
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -0,0 +1,174 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_predicates import LookupModule # noqa: E402
+
+DEFAULT_PREDICATES_1_1 = [
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'Hostname'}
+]
+
+DEFAULT_PREDICATES_1_2 = [
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'Hostname'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+]
+
+DEFAULT_PREDICATES_1_3 = [
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+]
+
+DEFAULT_PREDICATES_1_4 = [
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+]
+
+REGION_PREDICATE = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+}
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+ def setUp(self):
+ self.lookup = LookupModule()
+ self.default_facts = {
+ 'openshift': {
+ 'master': {},
+ 'common': {}
+ }
+ }
+
+ @raises(AnsibleError)
+ def test_missing_short_version_and_missing_openshift_release(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def check_defaults(self, release, deployment_type, default_predicates,
+ regions_enabled, short_version):
+ facts = copy.deepcopy(self.default_facts)
+ if short_version:
+ facts['openshift']['common']['short_version'] = release
+ else:
+ facts['openshift_release'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def test_openshift_release_defaults(self):
+ test_vars = [
+ ('1.1', 'origin', DEFAULT_PREDICATES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
+ ('1.2', 'origin', DEFAULT_PREDICATES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
+ ('1.3', 'origin', DEFAULT_PREDICATES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
+ ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+ ]
+
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in test_vars:
+ for prepend_v in (True, False):
+ if prepend_v:
+ release = 'v' + release
+ yield self.check_defaults, release, deployment_type, default_predicates, regions_enabled, False
+
+ def test_short_version_defaults(self):
+ test_vars = [
+ ('1.1', 'origin', DEFAULT_PREDICATES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
+ ('1.2', 'origin', DEFAULT_PREDICATES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
+ ('1.3', 'origin', DEFAULT_PREDICATES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
+ ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+ ]
+ for regions_enabled in (True, False):
+ for short_version, deployment_type, default_predicates in test_vars:
+ yield self.check_defaults, short_version, deployment_type, default_predicates, regions_enabled, True
+
+ @raises(AnsibleError)
+ def test_unknown_deployment_types(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '1.1'
+ facts['openshift']['common']['deployment_type'] = 'bogus'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_deployment_type(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '10.10'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def testMissingOpenShiftFacts(self):
+ facts = {}
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def testMissingMasterRole(self):
+ facts = {'openshift': {}}
+ self.lookup.run(None, variables=facts)
+
+ def testPreExistingPredicates(self):
+ facts = {
+ 'openshift': {
+ 'master': {
+ 'scheduler_predicates': [
+ {'name': 'pred_a'},
+ {'name': 'pred_b'}
+ ]
+ }
+ }
+ }
+ result = self.lookup.run(None, variables=facts)
+ assert_equal(result, facts['openshift']['master']['scheduler_predicates'])
+
+ def testDefinedPredicates(self):
+ facts = {
+ 'openshift': {'master': {}},
+ 'openshift_master_scheduler_predicates': [
+ {'name': 'pred_a'},
+ {'name': 'pred_b'}
+ ]
+ }
+ result = self.lookup.run(None, variables=facts)
+ assert_equal(result, facts['openshift_master_scheduler_predicates'])
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
new file mode 100644
index 000000000..d63f06904
--- /dev/null
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -0,0 +1,164 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_priorities import LookupModule # noqa: E402
+
+DEFAULT_PRIORITIES_1_1 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_2 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_3 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_4 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+]
+
+ZONE_PRIORITY = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+}
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+ def setUp(self):
+ self.lookup = LookupModule()
+ self.default_facts = {
+ 'openshift': {
+ 'master': {},
+ 'common': {}
+ }
+ }
+
+ @raises(AnsibleError)
+ def test_missing_short_version_and_missing_openshift_release(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def check_defaults(self, release, deployment_type, default_priorities,
+ zones_enabled, short_version):
+ facts = copy.deepcopy(self.default_facts)
+ if short_version:
+ facts['openshift']['common']['short_version'] = release
+ else:
+ facts['openshift_release'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def test_openshift_release_defaults(self):
+ test_vars = [
+ ('1.1', 'origin', DEFAULT_PRIORITIES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1),
+ ('1.2', 'origin', DEFAULT_PRIORITIES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2),
+ ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
+ ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+ ]
+
+ for zones_enabled in (True, False):
+ for release, deployment_type, default_priorities in test_vars:
+ for prepend_v in (True, False):
+ if prepend_v:
+ release = 'v' + release
+ yield self.check_defaults, release, deployment_type, default_priorities, zones_enabled, False
+
+ def test_short_version_defaults(self):
+ test_vars = [
+ ('1.1', 'origin', DEFAULT_PRIORITIES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1),
+ ('1.2', 'origin', DEFAULT_PRIORITIES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2),
+ ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
+ ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+ ]
+ for zones_enabled in (True, False):
+ for short_version, deployment_type, default_priorities in test_vars:
+ yield self.check_defaults, short_version, deployment_type, default_priorities, zones_enabled, True
+
+ @raises(AnsibleError)
+ def test_unknown_deployment_types(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '1.1'
+ facts['openshift']['common']['deployment_type'] = 'bogus'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_deployment_type(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '10.10'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_openshift_facts(self):
+ facts = {}
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_master_role(self):
+ facts = {'openshift': {}}
+ self.lookup.run(None, variables=facts)
+
+ def test_pre_existing_priorities(self):
+ facts = {
+ 'openshift': {
+ 'master': {
+ 'scheduler_priorities': [
+ {'name': 'pri_a', 'weight': 1},
+ {'name': 'pri_b', 'weight': 1}
+ ]
+ }
+ }
+ }
+ result = self.lookup.run(None, variables=facts)
+ assert_equal(result, facts['openshift']['master']['scheduler_priorities'])
+
+ def testDefinedPredicates(self):
+ facts = {
+ 'openshift': {'master': {}},
+ 'openshift_master_scheduler_priorities': [
+ {'name': 'pri_a', 'weight': 1},
+ {'name': 'pri_b', 'weight': 1}
+ ]
+ }
+ result = self.lookup.run(None, variables=facts)
+ assert_equal(result, facts['openshift_master_scheduler_priorities'])
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index 406d50c24..3f328dcfe 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -1,3 +1,8 @@
+---
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+
builddefaults_yaml:
BuildDefaults:
configuration:
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 37bb16f35..b60e52dfe 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
-from subprocess import call, check_output
+import subprocess
DOCUMENTATION = '''
---
@@ -29,7 +29,10 @@ class IpTablesAddRuleError(IpTablesError):
class IpTablesRemoveRuleError(IpTablesError):
- pass
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ super(IpTablesRemoveRuleError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
class IpTablesSaveError(IpTablesError):
@@ -37,14 +40,14 @@ class IpTablesSaveError(IpTablesError):
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -53,7 +56,7 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# TODO: implement rollbacks for any events that were successful and an
# exception was thrown later. For example, when the chain is created
# successfully, but the add/remove rule fails.
-class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
@@ -68,8 +71,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def save(self):
try:
- self.output.append(check_output(self.save_cmd,
- stderr=subprocess.STDOUT))
+ self.output.append(subprocess.check_output(self.save_cmd, stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
@@ -92,7 +94,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
cmd = self.cmd + ['-A'] + rule
try:
- self.output.append(check_output(cmd))
+ self.output.append(subprocess.check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
@@ -112,7 +114,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
cmd = self.cmd + ['-D'] + rule
try:
- self.output.append(check_output(cmd))
+ self.output.append(subprocess.check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
@@ -123,7 +125,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
- return True if call(check_cmd) == 0 else False
+ return True if subprocess.call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
@@ -136,7 +138,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
- output = check_output(cmd, stderr=subprocess.STDOUT)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in to_native(output).split('\n')]
@@ -155,8 +157,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
# Naively assume that if the last row is a REJECT or DROP rule,
# then we can insert our rule right before it, otherwise we
# assume that we can just append the rule.
- if (last_rule_num and last_rule_target
- and last_rule_target in ['REJECT', 'DROP']):
+ if (last_rule_num and last_rule_target and last_rule_target in ['REJECT', 'DROP']):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
@@ -164,7 +165,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
- output = check_output(cmd, stderr=subprocess.STDOUT)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
@@ -192,8 +193,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
try:
cmd = self.cmd + ['-N', self.chain]
- self.output.append(check_output(cmd,
- stderr=subprocess.STDOUT))
+ self.output.append(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
@@ -203,26 +203,26 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
- )
+ )
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
- return True if call(cmd) == 0 else False
+ return True if subprocess.call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
- return True if call(cmd) == 0 else False
+ return True if subprocess.call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
- def gen_save_cmd(self): # pylint: disable=no-self-use
+ def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
@@ -266,9 +266,9 @@ def main():
output=iptables_manager.output)
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import * # noqa: F403,E402
+from ansible.module_utils._text import to_native # noqa: E402
if __name__ == '__main__':
main()