diff options
Diffstat (limited to 'roles')
| -rw-r--r-- | roles/ansible/tasks/config.yml | 8 | ||||
| -rw-r--r-- | roles/ansible/tasks/main.yml (renamed from roles/ansible/tasks/main.yaml) | 4 | ||||
| -rw-r--r-- | roles/openshift_ansible_inventory/tasks/main.yml | 34 | ||||
| -rw-r--r-- | roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 | 15 | ||||
| -rwxr-xr-x | roles/openshift_facts/library/openshift_facts.py | 966 | ||||
| -rw-r--r-- | roles/openshift_facts/tasks/main.yml | 6 | ||||
| -rw-r--r-- | roles/openshift_master/defaults/main.yml | 5 | 
7 files changed, 679 insertions, 359 deletions
| diff --git a/roles/ansible/tasks/config.yml b/roles/ansible/tasks/config.yml new file mode 100644 index 000000000..5e361429b --- /dev/null +++ b/roles/ansible/tasks/config.yml @@ -0,0 +1,8 @@ +--- +- name: modify ansible.cfg +  lineinfile: +    dest: /etc/ansible/ansible.cfg +    backrefs: yes +    regexp: "^#?({{ item.option }})( *)=" +    line: '\1\2= {{ item.value }}' +  with_items: cfg_options diff --git a/roles/ansible/tasks/main.yaml b/roles/ansible/tasks/main.yml index 67a04b919..5d20a3b35 100644 --- a/roles/ansible/tasks/main.yaml +++ b/roles/ansible/tasks/main.yml @@ -5,3 +5,7 @@    yum:      pkg: ansible      state: installed + +- include: config.yml +  vars: +    cfg_options: "{{ ans_config }}" diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index dddfe24e3..5fe77e38b 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -24,22 +24,20 @@      owner: root      group: libra_ops -- lineinfile: -    dest: /etc/ansible/ansible.cfg -    backrefs: yes -    regexp: '^(hostfile|inventory)( *)=' -    line: '\1\2= /etc/ansible/inventory' +# This cron uses the above location to call its job +- name: Cron to keep cache fresh +  cron: +    name: 'multi_ec2_inventory' +    minute: '*/10' +    job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null' +  when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache -- name: setting ec2.ini destination_format -  lineinfile: -    dest: /usr/share/ansible/inventory/aws/ec2.ini -    regexp: '^destination_format *=' -    line: "destination_format = {{ oo_ec2_destination_format }}" -  when: oo_ec2_destination_format is defined - -- name: setting ec2.ini destination_format_tags -  lineinfile: -    dest: /usr/share/ansible/inventory/aws/ec2.ini -    regexp: '^destination_format_tags *=' -    line: "destination_format_tags = {{ oo_ec2_destination_format_tags }}" -  when: oo_ec2_destination_format_tags is defined +- name: Set cache location +  file: +    state: directory +    dest: "{{ oo_inventory_cache_location | dirname }}" +    owner: root +    group: libra_ops +    recurse: yes +    mode: '2750' +  when: oo_inventory_cache_location is defined diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 index 23dfe73b8..8228ab915 100644 --- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 +++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 @@ -1,11 +1,26 @@  # multi ec2 inventory configs  cache_max_age: {{ oo_inventory_cache_max_age }} +cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }}  accounts:  {% for account in oo_inventory_accounts %}    - name: {{ account.name }}      provider: {{ account.provider }} +    provider_config: +{%  for section, items in account.provider_config.items() %} +      {{ section }}: +{%    for property, value in items.items() %} +        {{ property }}: {{ value }} +{%    endfor %} +{% endfor %}      env_vars:        AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}        AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }} +{% if account.all_group is defined and account.hostvars is defined%} +    all_group: {{ account.all_group }} +    hostvars: +{%    for property, value in account.hostvars.items() %} +      {{ property }}: {{ value }} +{%    endfor %} +{% endif %}  {% endfor %} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 1e0d5c605..9c2657ff2 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,6 +1,11 @@  #!/usr/bin/python  # -*- coding: utf-8 -*-  # vim: expandtab:tabstop=4:shiftwidth=4 +# disable pylint checks +# temporarily disabled until items can be addressed: +#   fixme - until all TODO comments have been addressed +# pylint:disable=fixme +"""Ansible module for retrieving and setting openshift related facts"""  DOCUMENTATION = '''  --- @@ -15,294 +20,645 @@ EXAMPLES = '''  import ConfigParser  import copy -class OpenShiftFactsUnsupportedRoleError(Exception): -    pass -class OpenShiftFactsFileWriteError(Exception): -    pass +def hostname_valid(hostname): +    """ Test if specified hostname should be considered valid -class OpenShiftFactsMetadataUnavailableError(Exception): -    pass +        Args: +            hostname (str): hostname to test +        Returns: +            bool: True if valid, otherwise False +    """ +    if (not hostname or +            hostname.startswith('localhost') or +            hostname.endswith('localdomain') or +            len(hostname.split('.')) < 2): +        return False -class OpenShiftFacts(): -    known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] +    return True -    def __init__(self, role, filename, local_facts): -        self.changed = False -        self.filename = filename -        if role not in self.known_roles: -            raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role) -        self.role = role -        self.facts = self.generate_facts(local_facts) -    def generate_facts(self, local_facts): -        local_facts = self.init_local_facts(local_facts) -        roles = local_facts.keys() +def choose_hostname(hostnames=None, fallback=''): +    """ Choose a hostname from the provided hostnames -        defaults = self.get_defaults(roles) -        provider_facts = self.init_provider_facts() -        facts = self.apply_provider_facts(defaults, provider_facts, roles) +        Given a list of hostnames and a fallback value, choose a hostname to +        use. This function will prefer fqdns if they exist (excluding any that +        begin with localhost or end with localdomain) over ip addresses. -        facts = self.merge_facts(facts, local_facts) -        facts['current_config'] = self.current_config(facts) -        self.set_url_facts_if_unset(facts) -        return dict(openshift=facts) +        Args: +            hostnames (list): list of hostnames +            fallback (str): default value to set if hostnames does not contain +                            a valid hostname +        Returns: +            str: chosen hostname +    """ +    hostname = fallback +    if hostnames is None: +        return hostname + +    ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z' +    ips = [i for i in hostnames +           if (i is not None and isinstance(i, basestring) +               and re.match(ip_regex, i))] +    hosts = [i for i in hostnames +             if i is not None and i != '' and i not in ips] + +    for host_list in (hosts, ips): +        for host in host_list: +            if hostname_valid(host): +                return host + +    return hostname + + +def query_metadata(metadata_url, headers=None, expect_json=False): +    """ Return metadata from the provided metadata_url + +        Args: +            metadata_url (str): metadata url +            headers (dict): headers to set for metadata request +            expect_json (bool): does the metadata_url return json +        Returns: +            dict or list: metadata request result +    """ +    result, info = fetch_url(module, metadata_url, headers=headers) +    if info['status'] != 200: +        raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") +    if expect_json: +        return module.from_json(result.read()) +    else: +        return [line.strip() for line in result.readlines()] + + +def walk_metadata(metadata_url, headers=None, expect_json=False): +    """ Walk the metadata tree and return a dictionary of the entire tree + +        Args: +            metadata_url (str): metadata url +            headers (dict): headers to set for metadata request +            expect_json (bool): does the metadata_url return json +        Returns: +            dict: the result of walking the metadata tree +    """ +    metadata = dict() + +    for line in query_metadata(metadata_url, headers, expect_json): +        if line.endswith('/') and not line == 'public-keys/': +            key = line[:-1] +            metadata[key] = walk_metadata(metadata_url + line, +                                          headers, expect_json) +        else: +            results = query_metadata(metadata_url + line, headers, +                                     expect_json) +            if len(results) == 1: +                # disable pylint maybe-no-member because overloaded use of +                # the module name causes pylint to not detect that results +                # is an array or hash +                # pylint: disable=maybe-no-member +                metadata[line] = results.pop() +            else: +                metadata[line] = results +    return metadata -    def set_url_facts_if_unset(self, facts): -        if 'master' in facts: -            for (url_var, use_ssl, port, default) in [ -                    ('api_url', -                        facts['master']['api_use_ssl'], -                        facts['master']['api_port'], -                        facts['common']['hostname']), -                    ('public_api_url', -                        facts['master']['api_use_ssl'], -                        facts['master']['api_port'], -                        facts['common']['public_hostname']), -                    ('console_url', -                        facts['master']['console_use_ssl'], -                        facts['master']['console_port'], -                        facts['common']['hostname']), -                    ('public_console_url' 'console_use_ssl', -                        facts['master']['console_use_ssl'], -                        facts['master']['console_port'], -                        facts['common']['public_hostname'])]: -                if url_var not in facts['master']: -                    scheme = 'https' if use_ssl else 'http' -                    netloc = default -                    if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'): -                        netloc = "%s:%s" % (netloc, port) -                    facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', '')) - - -    # Query current OpenShift config and return a dictionary containing -    # settings that may be valuable for determining actions that need to be -    # taken in the playbooks/roles -    def current_config(self, facts): -        current_config=dict() -        roles = [ role for role in facts if role not in ['common','provider'] ] -        for role in roles: -            if 'roles' in current_config: -                current_config['roles'].append(role) +def get_provider_metadata(metadata_url, supports_recursive=False, +                          headers=None, expect_json=False): +    """ Retrieve the provider metadata + +        Args: +            metadata_url (str): metadata url +            supports_recursive (bool): does the provider metadata api support +                                       recursion +            headers (dict): headers to set for metadata request +            expect_json (bool): does the metadata_url return json +        Returns: +            dict: the provider metadata +    """ +    try: +        if supports_recursive: +            metadata = query_metadata(metadata_url, headers, +                                      expect_json) +        else: +            metadata = walk_metadata(metadata_url, headers, +                                     expect_json) +    except OpenShiftFactsMetadataUnavailableError: +        metadata = None +    return metadata + + +def normalize_gce_facts(metadata, facts): +    """ Normalize gce facts + +        Args: +            metadata (dict): provider metadata +            facts (dict): facts to update +        Returns: +            dict: the result of adding the normalized metadata to the provided +                  facts dict +    """ +    for interface in metadata['instance']['networkInterfaces']: +        int_info = dict(ips=[interface['ip']], network_type='gce') +        int_info['public_ips'] = [ac['externalIp'] for ac +                                  in interface['accessConfigs']] +        int_info['public_ips'].extend(interface['forwardedIps']) +        _, _, network_id = interface['network'].rpartition('/') +        int_info['network_id'] = network_id +        facts['network']['interfaces'].append(int_info) +    _, _, zone = metadata['instance']['zone'].rpartition('/') +    facts['zone'] = zone +    facts['external_id'] = metadata['instance']['id'] + +    # Default to no sdn for GCE deployments +    facts['use_openshift_sdn'] = False + +    # GCE currently only supports a single interface +    facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0] +    pub_ip = facts['network']['interfaces'][0]['public_ips'][0] +    facts['network']['public_ip'] = pub_ip +    facts['network']['hostname'] = metadata['instance']['hostname'] + +    # TODO: attempt to resolve public_hostname +    facts['network']['public_hostname'] = facts['network']['public_ip'] + +    return facts + + +def normalize_aws_facts(metadata, facts): +    """ Normalize aws facts + +        Args: +            metadata (dict): provider metadata +            facts (dict): facts to update +        Returns: +            dict: the result of adding the normalized metadata to the provided +                  facts dict +    """ +    for interface in sorted( +            metadata['network']['interfaces']['macs'].values(), +            key=lambda x: x['device-number'] +    ): +        int_info = dict() +        var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} +        for ips_var, int_var in var_map.iteritems(): +            ips = interface.get(int_var) +            if isinstance(ips, basestring): +                int_info[ips_var] = [ips]              else: -                current_config['roles'] = [role] +                int_info[ips_var] = ips +        if 'vpc-id' in interface: +            int_info['network_type'] = 'vpc' +        else: +            int_info['network_type'] = 'classic' +        if int_info['network_type'] == 'vpc': +            int_info['network_id'] = interface['subnet-id'] +        else: +            int_info['network_id'] = None +        facts['network']['interfaces'].append(int_info) +    facts['zone'] = metadata['placement']['availability-zone'] +    facts['external_id'] = metadata['instance-id'] + +    # TODO: actually attempt to determine default local and public ips +    # by using the ansible default ip fact and the ipv4-associations +    # from the ec2 metadata +    facts['network']['ip'] = metadata.get('local-ipv4') +    facts['network']['public_ip'] = metadata.get('public-ipv4') + +    # TODO: verify that local hostname makes sense and is resolvable +    facts['network']['hostname'] = metadata.get('local-hostname') + +    # TODO: verify that public hostname makes sense and is resolvable +    facts['network']['public_hostname'] = metadata.get('public-hostname') + +    return facts + + +def normalize_openstack_facts(metadata, facts): +    """ Normalize openstack facts + +        Args: +            metadata (dict): provider metadata +            facts (dict): facts to update +        Returns: +            dict: the result of adding the normalized metadata to the provided +                  facts dict +    """ +    # openstack ec2 compat api does not support network interfaces and +    # the version tested on did not include the info in the openstack +    # metadata api, should be updated if neutron exposes this. + +    facts['zone'] = metadata['availability_zone'] +    facts['external_id'] = metadata['uuid'] +    facts['network']['ip'] = metadata['ec2_compat']['local-ipv4'] +    facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4'] + +    # TODO: verify local hostname makes sense and is resolvable +    facts['network']['hostname'] = metadata['hostname'] + +    # TODO: verify that public hostname makes sense and is resolvable +    pub_h = metadata['ec2_compat']['public-hostname'] +    facts['network']['public_hostname'] = pub_h + +    return facts + + +def normalize_provider_facts(provider, metadata): +    """ Normalize provider facts + +        Args: +            provider (str): host provider +            metadata (dict): provider metadata +        Returns: +            dict: the normalized provider facts +    """ +    if provider is None or metadata is None: +        return {} + +    # TODO: test for ipv6_enabled where possible (gce, aws do not support) +    # and configure ipv6 facts if available + +    # TODO: add support for setting user_data if available + +    facts = dict(name=provider, metadata=metadata, +                 network=dict(interfaces=[], ipv6_enabled=False)) +    if provider == 'gce': +        facts = normalize_gce_facts(metadata, facts) +    elif provider == 'ec2': +        facts = normalize_aws_facts(metadata, facts) +    elif provider == 'openstack': +        facts = normalize_openstack_facts(metadata, facts) +    return facts + + +def set_url_facts_if_unset(facts): +    """ Set url facts if not already present in facts dict + +        Args: +            facts (dict): existing facts +        Returns: +            dict: the facts dict updated with the generated url facts if they +                  were not already present +    """ +    if 'master' in facts: +        for (url_var, use_ssl, port, default) in [ +                ('api_url', +                 facts['master']['api_use_ssl'], +                 facts['master']['api_port'], +                 facts['common']['hostname']), +                ('public_api_url', +                 facts['master']['api_use_ssl'], +                 facts['master']['api_port'], +                 facts['common']['public_hostname']), +                ('console_url', +                 facts['master']['console_use_ssl'], +                 facts['master']['console_port'], +                 facts['common']['hostname']), +                ('public_console_url' 'console_use_ssl', +                 facts['master']['console_use_ssl'], +                 facts['master']['console_port'], +                 facts['common']['public_hostname'])]: +            if url_var not in facts['master']: +                scheme = 'https' if use_ssl else 'http' +                netloc = default +                if ((scheme == 'https' and port != '443') +                        or (scheme == 'http' and port != '80')): +                    netloc = "%s:%s" % (netloc, port) +                facts['master'][url_var] = urlparse.urlunparse( +                    (scheme, netloc, '', '', '', '') +                ) +    return facts + + +def get_current_config(facts): +    """ Get current openshift config + +        Args: +            facts (dict): existing facts +        Returns: +            dict: the facts dict updated with the current openshift config +    """ +    current_config = dict() +    roles = [role for role in facts if role not in ['common', 'provider']] +    for role in roles: +        if 'roles' in current_config: +            current_config['roles'].append(role) +        else: +            current_config['roles'] = [role] -            # TODO: parse the /etc/sysconfig/openshift-{master,node} config to -            # determine the location of files. +        # TODO: parse the /etc/sysconfig/openshift-{master,node} config to +        # determine the location of files. -            # Query kubeconfig settings -            kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' -            if role == 'node': -                kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname']) +        # Query kubeconfig settings +        kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' +        if role == 'node': +            kubeconfig_dir = os.path.join( +                kubeconfig_dir, "node-%s" % facts['common']['hostname'] +            ) -            kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') -            if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path): +        kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') +        if (os.path.isfile('/usr/bin/openshift') +                and os.path.isfile(kubeconfig_path)): +            try: +                _, output, _ = module.run_command( +                    ["/usr/bin/openshift", "ex", "config", "view", "-o", +                     "json", "--kubeconfig=%s" % kubeconfig_path], +                    check_rc=False +                ) +                config = json.loads(output) + +                cad = 'certificate-authority-data' +                try: +                    for cluster in config['clusters']: +                        config['clusters'][cluster][cad] = 'masked' +                except KeyError: +                    pass                  try: -                    _, output, error = module.run_command(["/usr/bin/openshift", "ex", -                                                           "config", "view", "-o", -                                                           "json", -                                                           "--kubeconfig=%s" % kubeconfig_path], -                                                           check_rc=False) -                    config = json.loads(output) - -                    try: -                        for cluster in config['clusters']: -                            config['clusters'][cluster]['certificate-authority-data'] = 'masked' -                    except KeyError: -                        pass -                    try: -                        for user in config['users']: -                            config['users'][user]['client-certificate-data'] = 'masked' -                            config['users'][user]['client-key-data'] = 'masked' -                    except KeyError: -                        pass - -                    current_config['kubeconfig'] = config -                except Exception: +                    for user in config['users']: +                        config['users'][user][cad] = 'masked' +                        config['users'][user]['client-key-data'] = 'masked' +                except KeyError:                      pass -        return current_config +                current_config['kubeconfig'] = config +            # override pylint broad-except warning, since we do not want +            # to bubble up any exceptions if openshift ex config view +            # fails +            # pylint: disable=broad-except +            except Exception: +                pass -    def apply_provider_facts(self, facts, provider_facts, roles): -        if not provider_facts: -            return facts +    return current_config -        use_openshift_sdn = provider_facts.get('use_openshift_sdn') -        if isinstance(use_openshift_sdn, bool): -            facts['common']['use_openshift_sdn'] = use_openshift_sdn -        common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] -        for h_var, ip_var in common_vars: -            ip_value = provider_facts['network'].get(ip_var) -            if ip_value: -                facts['common'][ip_var] = ip_value +def apply_provider_facts(facts, provider_facts, roles): +    """ Apply provider facts to supplied facts dict -            facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var]) +        Args: +            facts (dict): facts dict to update +            provider_facts (dict): provider facts to apply +            roles: host roles +        Returns: +            dict: the merged facts +    """ +    if not provider_facts: +        return facts -        if 'node' in roles: -            ext_id = provider_facts.get('external_id') -            if ext_id: -                facts['node']['external_id'] = ext_id +    use_openshift_sdn = provider_facts.get('use_openshift_sdn') +    if isinstance(use_openshift_sdn, bool): +        facts['common']['use_openshift_sdn'] = use_openshift_sdn -        facts['provider'] = provider_facts -        return facts +    common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] +    for h_var, ip_var in common_vars: +        ip_value = provider_facts['network'].get(ip_var) +        if ip_value: +            facts['common'][ip_var] = ip_value -    def hostname_valid(self, hostname): -        if (not hostname or -                hostname.startswith('localhost') or -                hostname.endswith('localdomain') or -                len(hostname.split('.')) < 2): -            return False +        facts['common'][h_var] = choose_hostname( +            [provider_facts['network'].get(h_var)], +            facts['common'][ip_var] +        ) -        return True +    if 'node' in roles: +        ext_id = provider_facts.get('external_id') +        if ext_id: +            facts['node']['external_id'] = ext_id + +    facts['provider'] = provider_facts +    return facts + + +def merge_facts(orig, new): +    """ Recursively merge facts dicts + +        Args: +            orig (dict): existing facts +            new (dict): facts to update +        Returns: +            dict: the merged facts +    """ +    facts = dict() +    for key, value in orig.iteritems(): +        if key in new: +            if isinstance(value, dict): +                facts[key] = merge_facts(value, new[key]) +            else: +                facts[key] = copy.copy(new[key]) +        else: +            facts[key] = copy.deepcopy(value) +    new_keys = set(new.keys()) - set(orig.keys()) +    for key in new_keys: +        facts[key] = copy.deepcopy(new[key]) +    return facts + + +def save_local_facts(filename, facts): +    """ Save local facts + +        Args: +            filename (str): local facts file +            facts (dict): facts to set +    """ +    try: +        fact_dir = os.path.dirname(filename) +        if not os.path.exists(fact_dir): +            os.makedirs(fact_dir) +        with open(filename, 'w') as fact_file: +            fact_file.write(module.jsonify(facts)) +    except (IOError, OSError) as ex: +        raise OpenShiftFactsFileWriteError( +            "Could not create fact file: %s, error: %s" % (filename, ex) +        ) -    def choose_hostname(self, hostnames=[], fallback=''): -        hostname = fallback -        ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ] -        hosts = [ i for i in hostnames if i is not None and i not in set(ips) ] +def get_local_facts_from_file(filename): +    """ Retrieve local facts from fact file + +        Args: +            filename (str): local facts file +        Returns: +            dict: the retrieved facts +    """ +    local_facts = dict() +    try: +        # Handle conversion of INI style facts file to json style +        ini_facts = ConfigParser.SafeConfigParser() +        ini_facts.read(filename) +        for section in ini_facts.sections(): +            local_facts[section] = dict() +            for key, value in ini_facts.items(section): +                local_facts[section][key] = value + +    except (ConfigParser.MissingSectionHeaderError, +            ConfigParser.ParsingError): +        try: +            with open(filename, 'r') as facts_file: +                local_facts = json.load(facts_file) +        except (ValueError, IOError): +            pass -        for host_list in (hosts, ips): -            for h in host_list: -                if self.hostname_valid(h): -                    return h +    return local_facts -        return hostname + +class OpenShiftFactsUnsupportedRoleError(Exception): +    """OpenShift Facts Unsupported Role Error""" +    pass + + +class OpenShiftFactsFileWriteError(Exception): +    """OpenShift Facts File Write Error""" +    pass + + +class OpenShiftFactsMetadataUnavailableError(Exception): +    """OpenShift Facts Metadata Unavailable Error""" +    pass + + +class OpenShiftFacts(object): +    """ OpenShift Facts + +        Attributes: +            facts (dict): OpenShift facts for the host + +        Args: +            role (str): role for setting local facts +            filename (str): local facts file to use +            local_facts (dict): local facts to set + +        Raises: +            OpenShiftFactsUnsupportedRoleError: +    """ +    known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] + +    def __init__(self, role, filename, local_facts): +        self.changed = False +        self.filename = filename +        if role not in self.known_roles: +            raise OpenShiftFactsUnsupportedRoleError( +                "Role %s is not supported by this module" % role +            ) +        self.role = role +        self.system_facts = ansible_facts(module) +        self.facts = self.generate_facts(local_facts) + +    def generate_facts(self, local_facts): +        """ Generate facts + +            Args: +                local_facts (dict): local_facts for overriding generated +                                    defaults + +            Returns: +                dict: The generated facts +        """ +        local_facts = self.init_local_facts(local_facts) +        roles = local_facts.keys() + +        defaults = self.get_defaults(roles) +        provider_facts = self.init_provider_facts() +        facts = apply_provider_facts(defaults, provider_facts, roles) +        facts = merge_facts(facts, local_facts) +        facts['current_config'] = get_current_config(facts) +        facts = set_url_facts_if_unset(facts) +        return dict(openshift=facts)      def get_defaults(self, roles): -        ansible_facts = self.get_ansible_facts() +        """ Get default fact values +            Args: +                roles (list): list of roles for this host + +            Returns: +                dict: The generated default facts +        """          defaults = dict()          common = dict(use_openshift_sdn=True) -        ip = ansible_facts['default_ipv4']['address'] -        common['ip'] = ip -        common['public_ip'] = ip +        ip_addr = self.system_facts['default_ipv4']['address'] +        common['ip'] = ip_addr +        common['public_ip'] = ip_addr -        rc, output, error = module.run_command(['hostname', '-f']) -        hostname_f = output.strip() if rc == 0 else '' -        hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']] -        hostname = self.choose_hostname(hostname_values) +        exit_code, output, _ = module.run_command(['hostname', '-f']) +        hostname_f = output.strip() if exit_code == 0 else '' +        hostname_values = [hostname_f, self.system_facts['nodename'], +                           self.system_facts['fqdn']] +        hostname = choose_hostname(hostname_values)          common['hostname'] = hostname          common['public_hostname'] = hostname          defaults['common'] = common          if 'master' in roles: -            # TODO: provide for a better way to override just the port, or just -            # the urls, instead of forcing both, also to override the hostname -            # without having to re-generate these urls later              master = dict(api_use_ssl=True, api_port='8443', -                    console_use_ssl=True, console_path='/console', -                    console_port='8443', etcd_use_ssl=False, -                    etcd_port='4001', portal_net='172.30.17.0/24') +                          console_use_ssl=True, console_path='/console', +                          console_port='8443', etcd_use_ssl=False, +                          etcd_port='4001', portal_net='172.30.17.0/24')              defaults['master'] = master          if 'node' in roles:              node = dict(external_id=common['hostname'], pod_cidr='',                          labels={}, annotations={}) -            node['resources_cpu'] = ansible_facts['processor_cores'] -            node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75) +            node['resources_cpu'] = self.system_facts['processor_cores'] +            node['resources_memory'] = int( +                int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75 +            )              defaults['node'] = node          return defaults -    def merge_facts(self, orig, new): -        facts = dict() -        for key, value in orig.iteritems(): -            if key in new: -                if isinstance(value, dict): -                    facts[key] = self.merge_facts(value, new[key]) -                else: -                    facts[key] = copy.copy(new[key]) -            else: -                facts[key] = copy.deepcopy(value) -        new_keys = set(new.keys()) - set(orig.keys()) -        for key in new_keys: -            facts[key] = copy.deepcopy(new[key]) -        return facts - -    def query_metadata(self, metadata_url, headers=None, expect_json=False): -        r, info = fetch_url(module, metadata_url, headers=headers) -        if info['status'] != 200: -            raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") -        if expect_json: -            return module.from_json(r.read()) -        else: -            return [line.strip() for line in r.readlines()] - -    def walk_metadata(self, metadata_url, headers=None, expect_json=False): -        metadata = dict() - -        for line in self.query_metadata(metadata_url, headers, expect_json): -            if line.endswith('/') and not line == 'public-keys/': -                key = line[:-1] -                metadata[key]=self.walk_metadata(metadata_url + line, headers, -                                                 expect_json) -            else: -                results = self.query_metadata(metadata_url + line, headers, -                                              expect_json) -                if len(results) == 1: -                    metadata[line] = results.pop() -                else: -                    metadata[line] = results -        return metadata - -    def get_provider_metadata(self, metadata_url, supports_recursive=False, -                          headers=None, expect_json=False): -        try: -            if supports_recursive: -                metadata = self.query_metadata(metadata_url, headers, expect_json) -            else: -                metadata = self.walk_metadata(metadata_url, headers, expect_json) -        except OpenShiftFactsMetadataUnavailableError as e: -            metadata = None -        return metadata - -    def get_ansible_facts(self): -        if not hasattr(self, 'ansible_facts'): -            self.ansible_facts = ansible_facts(module) -        return self.ansible_facts -      def guess_host_provider(self): +        """ Guess the host provider + +            Returns: +                dict: The generated default facts for the detected provider +        """          # TODO: cloud provider facts should probably be submitted upstream -        ansible_facts = self.get_ansible_facts() -        product_name = ansible_facts['product_name'] -        product_version = ansible_facts['product_version'] -        virt_type = ansible_facts['virtualization_type'] -        virt_role = ansible_facts['virtualization_role'] +        product_name = self.system_facts['product_name'] +        product_version = self.system_facts['product_version'] +        virt_type = self.system_facts['virtualization_type'] +        virt_role = self.system_facts['virtualization_role']          provider = None          metadata = None          # TODO: this is not exposed through module_utils/facts.py in ansible,          # need to create PR for ansible to expose it -        bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') +        bios_vendor = get_file_content( +            '/sys/devices/virtual/dmi/id/bios_vendor' +        )          if bios_vendor == 'Google':              provider = 'gce' -            metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true' +            metadata_url = ('http://metadata.google.internal/' +                            'computeMetadata/v1/?recursive=true')              headers = {'Metadata-Flavor': 'Google'} -            metadata = self.get_provider_metadata(metadata_url, True, headers, -                                                  True) +            metadata = get_provider_metadata(metadata_url, True, headers, +                                             True)              # Filter sshKeys and serviceAccounts from gce metadata              if metadata:                  metadata['project']['attributes'].pop('sshKeys', None)                  metadata['instance'].pop('serviceAccounts', None) -        elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): +        elif (virt_type == 'xen' and virt_role == 'guest' +              and re.match(r'.*\.amazon$', product_version)):              provider = 'ec2'              metadata_url = 'http://169.254.169.254/latest/meta-data/' -            metadata = self.get_provider_metadata(metadata_url) +            metadata = get_provider_metadata(metadata_url)          elif re.search(r'OpenStack', product_name):              provider = 'openstack' -            metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' -            metadata = self.get_provider_metadata(metadata_url, True, None, True) +            metadata_url = ('http://169.254.169.254/openstack/latest/' +                            'meta_data.json') +            metadata = get_provider_metadata(metadata_url, True, None, +                                             True)              if metadata:                  ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' -                metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) - +                metadata['ec2_compat'] = get_provider_metadata( +                    ec2_compat_url +                ) + +                # disable pylint maybe-no-member because overloaded use of +                # the module name causes pylint to not detect that results +                # is an array or hash +                # pylint: disable=maybe-no-member                  # Filter public_keys  and random_seed from openstack metadata                  metadata.pop('public_keys', None)                  metadata.pop('random_seed', None) @@ -312,146 +668,74 @@ class OpenShiftFacts():          return dict(name=provider, metadata=metadata) -    def normalize_provider_facts(self, provider, metadata): -        if provider is None or metadata is None: -            return {} - -        # TODO: test for ipv6_enabled where possible (gce, aws do not support) -        # and configure ipv6 facts if available - -        # TODO: add support for setting user_data if available - -        facts = dict(name=provider, metadata=metadata) -        network = dict(interfaces=[], ipv6_enabled=False) -        if provider == 'gce': -            for interface in metadata['instance']['networkInterfaces']: -                int_info = dict(ips=[interface['ip']], network_type=provider) -                int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ] -                int_info['public_ips'].extend(interface['forwardedIps']) -                _, _, network_id = interface['network'].rpartition('/') -                int_info['network_id'] = network_id -                network['interfaces'].append(int_info) -            _, _, zone = metadata['instance']['zone'].rpartition('/') -            facts['zone'] = zone -            facts['external_id'] = metadata['instance']['id'] - -            # Default to no sdn for GCE deployments -            facts['use_openshift_sdn'] = False - -            # GCE currently only supports a single interface -            network['ip'] = network['interfaces'][0]['ips'][0] -            network['public_ip'] = network['interfaces'][0]['public_ips'][0] -            network['hostname'] = metadata['instance']['hostname'] - -            # TODO: attempt to resolve public_hostname -            network['public_hostname'] = network['public_ip'] -        elif provider == 'ec2': -            for interface in sorted(metadata['network']['interfaces']['macs'].values(), -                                    key=lambda x: x['device-number']): -                int_info = dict() -                var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} -                for ips_var, int_var in var_map.iteritems(): -                    ips = interface[int_var] -                    int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips -                int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic' -                int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None -                network['interfaces'].append(int_info) -            facts['zone'] = metadata['placement']['availability-zone'] -            facts['external_id'] = metadata['instance-id'] - -            # TODO: actually attempt to determine default local and public ips -            # by using the ansible default ip fact and the ipv4-associations -            # form the ec2 metadata -            network['ip'] = metadata['local-ipv4'] -            network['public_ip'] = metadata['public-ipv4'] - -            # TODO: verify that local hostname makes sense and is resolvable -            network['hostname'] = metadata['local-hostname'] - -            # TODO: verify that public hostname makes sense and is resolvable -            network['public_hostname'] = metadata['public-hostname'] -        elif provider == 'openstack': -            # openstack ec2 compat api does not support network interfaces and -            # the version tested on did not include the info in the openstack -            # metadata api, should be updated if neutron exposes this. - -            facts['zone'] = metadata['availability_zone'] -            facts['external_id'] = metadata['uuid'] -            network['ip'] = metadata['ec2_compat']['local-ipv4'] -            network['public_ip'] = metadata['ec2_compat']['public-ipv4'] - -            # TODO: verify local hostname makes sense and is resolvable -            network['hostname'] = metadata['hostname'] - -            # TODO: verify that public hostname makes sense and is resolvable -            network['public_hostname'] = metadata['ec2_compat']['public-hostname'] - -        facts['network'] = network -        return facts -      def init_provider_facts(self): +        """ Initialize the provider facts + +            Returns: +                dict: The normalized provider facts +        """          provider_info = self.guess_host_provider() -        provider_facts = self.normalize_provider_facts( -                provider_info.get('name'), -                provider_info.get('metadata') +        provider_facts = normalize_provider_facts( +            provider_info.get('name'), +            provider_info.get('metadata')          )          return provider_facts -    def get_facts(self): -        # TODO: transform facts into cleaner format (openshift_<blah> instead -        # of openshift.<blah> -        return self.facts - -    def init_local_facts(self, facts={}): -        changed = False +    def init_local_facts(self, facts=None): +        """ Initialize the provider facts -        local_facts = ConfigParser.SafeConfigParser() -        local_facts.read(self.filename) +            Args: +                facts (dict): local facts to set -        section = self.role -        if not local_facts.has_section(section): -            local_facts.add_section(section) +            Returns: +                dict: The result of merging the provided facts with existing +                      local facts +        """ +        changed = False +        facts_to_set = {self.role: dict()} +        if facts is not None: +            facts_to_set[self.role] = facts + +        local_facts = get_local_facts_from_file(self.filename) + +        for arg in ['labels', 'annotations']: +            if arg in facts_to_set and isinstance(facts_to_set[arg], +                                                  basestring): +                facts_to_set[arg] = module.from_json(facts_to_set[arg]) + +        new_local_facts = merge_facts(local_facts, facts_to_set) +        for facts in new_local_facts.values(): +            keys_to_delete = [] +            for fact, value in facts.iteritems(): +                if value == "" or value is None: +                    keys_to_delete.append(fact) +            for key in keys_to_delete: +                del facts[key] + +        if new_local_facts != local_facts:              changed = True -        for key, value in facts.iteritems(): -            if isinstance(value, bool): -                value = str(value) -            if not value: -                continue -            if not local_facts.has_option(section, key) or local_facts.get(section, key) != value: -                local_facts.set(section, key, value) -                changed = True +            if not module.check_mode: +                save_local_facts(self.filename, new_local_facts) -        if changed and not module.check_mode: -            try: -                fact_dir = os.path.dirname(self.filename) -                if not os.path.exists(fact_dir): -                    os.makedirs(fact_dir) -                with open(self.filename, 'w') as fact_file: -                        local_facts.write(fact_file) -            except (IOError, OSError) as e: -                raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))          self.changed = changed - -        role_facts = dict() -        for section in local_facts.sections(): -            role_facts[section] = dict() -            for opt, val in local_facts.items(section): -                role_facts[section][opt] = val -        return role_facts +        return new_local_facts  def main(): +    """ main """ +    # disabling pylint errors for global-variable-undefined and invalid-name +    # for 'global module' usage, since it is required to use ansible_facts +    # pylint: disable=global-variable-undefined, invalid-name      global module      module = AnsibleModule( -            argument_spec = dict( -                    role=dict(default='common', -                              choices=OpenShiftFacts.known_roles, -                              required=False), -                    local_facts=dict(default={}, type='dict', required=False), -            ), -            supports_check_mode=True, -            add_file_common_args=True, +        argument_spec=dict( +            role=dict(default='common', required=False, +                      choices=OpenShiftFacts.known_roles), +            local_facts=dict(default=None, type='dict', required=False), +        ), +        supports_check_mode=True, +        add_file_common_args=True,      )      role = module.params['role'] @@ -464,11 +748,13 @@ def main():      file_params['path'] = fact_file      file_args = module.load_file_common_arguments(file_params)      changed = module.set_fs_attributes_if_different(file_args, -            openshift_facts.changed) +                                                    openshift_facts.changed)      return module.exit_json(changed=changed, -            ansible_facts=openshift_facts.get_facts()) +                            ansible_facts=openshift_facts.facts) +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import  # import module snippets  from ansible.module_utils.basic import *  from ansible.module_utils.facts import * diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 5a7d10d25..d71e6d019 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,3 +1,9 @@  --- +- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 +  assert: +    that: +    - ansible_version | version_compare('1.8.0', 'ge') +    - ansible_version | version_compare('1.9.0', 'ne') +  - name: Gather OpenShift facts    openshift_facts: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 87fb347a8..56cf43531 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -2,12 +2,15 @@  openshift_node_ips: []  # TODO: update setting these values based on the facts -# TODO: update for console port change  os_firewall_allow:  - service: etcd embedded    port: 4001/tcp  - service: OpenShift api https    port: 8443/tcp +- service: OpenShift dns tcp +  port: 53/tcp +- service: OpenShift dns udp +  port: 53/udp  os_firewall_deny:  - service: OpenShift api http    port: 8080/tcp | 
