From 5f09b0e1d3b27fb81473bfd92d424358505969e5 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Fri, 1 May 2015 11:29:40 -0400
Subject: openshift_fact and misc fixes

- Do not attempt to fetch file to same file location when playbooks are run
  locally on master

- Fix for openshift_facts when run against a host in a VPC that does not assign internal/external hostnames or ips

- Fix setting of labels and annotations on node instances and in
  openshift_facts
  - converted openshift_facts to use json for local_fact storage instead of
    an ini file, included code that should migrate existing ini users to json
  - added region/zone setting to byo inventory

- Fix fact related bug where deployment_type was being set on node role
  instead of common role for node hosts
---
 inventory/byo/hosts                              |   6 +-
 playbooks/common/openshift-node/config.yml       |   5 +-
 roles/openshift_facts/library/openshift_facts.py | 337 ++++++++++++++---------
 3 files changed, 217 insertions(+), 131 deletions(-)

diff --git a/inventory/byo/hosts b/inventory/byo/hosts
index 98dbb4fd8..728eec8aa 100644
--- a/inventory/byo/hosts
+++ b/inventory/byo/hosts
@@ -20,7 +20,8 @@ deployment_type=enterprise
 openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
 
 # Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
 
 # Origin copr repo
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -31,4 +32,5 @@ ose3-master-ansible.test.example.com
 
 # host group for nodes
 [nodes]
-ose3-node[1:2]-ansible.test.example.com
+ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 433cfeb87..96641a274 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -15,6 +15,7 @@
         local_facts:
           hostname: "{{ openshift_hostname | default(None) }}"
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
       - role: node
         local_facts:
           external_id: "{{ openshift_node_external_id | default(None) }}"
@@ -23,7 +24,6 @@
           pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
           labels: "{{ openshift_node_labels | default(None) }}"
           annotations: "{{ openshift_node_annotations | default(None) }}"
-          deployment_type: "{{ openshift_deployment_type }}"
 
 
 - name: Create temp directory for syncing certs
@@ -68,7 +68,6 @@
     fetch:
       src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
       dest: "{{ sync_tmpdir }}/"
-      flat: yes
       fail_on_missing: yes
       validate_checksum: yes
     with_items: openshift_nodes
@@ -79,7 +78,7 @@
   hosts: oo_nodes_to_config
   gather_facts: no
   vars:
-    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}/{{ groups['oo_first_master'][0] }}/{{ hostvars.localhost.mktemp.stdout }}"
     openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
   pre_tasks:
   - name: Ensure certificate directory exists
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 1e0d5c605..bb40a9569 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,6 +1,15 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 # vim: expandtab:tabstop=4:shiftwidth=4
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+#   fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+    #   no-self-use
+    #   too-many-locals
+    #   too-many-branches
+    # pylint:disable=fixme, no-self-use
+    # pylint:disable=too-many-locals, too-many-branches
 
 DOCUMENTATION = '''
 ---
@@ -24,15 +33,18 @@ class OpenShiftFactsFileWriteError(Exception):
 class OpenShiftFactsMetadataUnavailableError(Exception):
     pass
 
-class OpenShiftFacts():
+class OpenShiftFacts(object):
     known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
 
     def __init__(self, role, filename, local_facts):
         self.changed = False
         self.filename = filename
         if role not in self.known_roles:
-            raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+            raise OpenShiftFactsUnsupportedRoleError(
+                "Role %s is not supported by this module" % role
+            )
         self.role = role
+        self.system_facts = ansible_facts(module)
         self.facts = self.generate_facts(local_facts)
 
     def generate_facts(self, local_facts):
@@ -42,7 +54,6 @@ class OpenShiftFacts():
         defaults = self.get_defaults(roles)
         provider_facts = self.init_provider_facts()
         facts = self.apply_provider_facts(defaults, provider_facts, roles)
-
         facts = self.merge_facts(facts, local_facts)
         facts['current_config'] = self.current_config(facts)
         self.set_url_facts_if_unset(facts)
@@ -53,35 +64,38 @@ class OpenShiftFacts():
         if 'master' in facts:
             for (url_var, use_ssl, port, default) in [
                     ('api_url',
-                        facts['master']['api_use_ssl'],
-                        facts['master']['api_port'],
-                        facts['common']['hostname']),
+                     facts['master']['api_use_ssl'],
+                     facts['master']['api_port'],
+                     facts['common']['hostname']),
                     ('public_api_url',
-                        facts['master']['api_use_ssl'],
-                        facts['master']['api_port'],
-                        facts['common']['public_hostname']),
+                     facts['master']['api_use_ssl'],
+                     facts['master']['api_port'],
+                     facts['common']['public_hostname']),
                     ('console_url',
-                        facts['master']['console_use_ssl'],
-                        facts['master']['console_port'],
-                        facts['common']['hostname']),
+                     facts['master']['console_use_ssl'],
+                     facts['master']['console_port'],
+                     facts['common']['hostname']),
                     ('public_console_url' 'console_use_ssl',
-                        facts['master']['console_use_ssl'],
-                        facts['master']['console_port'],
-                        facts['common']['public_hostname'])]:
+                     facts['master']['console_use_ssl'],
+                     facts['master']['console_port'],
+                     facts['common']['public_hostname'])]:
                 if url_var not in facts['master']:
                     scheme = 'https' if use_ssl else 'http'
                     netloc = default
-                    if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+                    if ((scheme == 'https' and port != '443')
+                            or (scheme == 'http' and port != '80')):
                         netloc = "%s:%s" % (netloc, port)
-                    facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+                    facts['master'][url_var] = urlparse.urlunparse(
+                        (scheme, netloc, '', '', '', '')
+                    )
 
 
     # Query current OpenShift config and return a dictionary containing
     # settings that may be valuable for determining actions that need to be
     # taken in the playbooks/roles
     def current_config(self, facts):
-        current_config=dict()
-        roles = [ role for role in facts if role not in ['common','provider'] ]
+        current_config = dict()
+        roles = [role for role in facts if role not in ['common', 'provider']]
         for role in roles:
             if 'roles' in current_config:
                 current_config['roles'].append(role)
@@ -94,31 +108,40 @@ class OpenShiftFacts():
             # Query kubeconfig settings
             kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
             if role == 'node':
-                kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+                kubeconfig_dir = os.path.join(
+                    kubeconfig_dir, "node-%s" % facts['common']['hostname']
+                )
 
             kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
-            if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+            if (os.path.isfile('/usr/bin/openshift')
+                    and os.path.isfile(kubeconfig_path)):
                 try:
-                    _, output, error = module.run_command(["/usr/bin/openshift", "ex",
-                                                           "config", "view", "-o",
-                                                           "json",
-                                                           "--kubeconfig=%s" % kubeconfig_path],
-                                                           check_rc=False)
+                    _, output, _ = module.run_command(
+                        ["/usr/bin/openshift", "ex", "config", "view", "-o",
+                         "json", "--kubeconfig=%s" % kubeconfig_path],
+                        check_rc=False
+                    )
                     config = json.loads(output)
 
+                    cad = 'certificate-authority-data'
                     try:
                         for cluster in config['clusters']:
-                            config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+                            config['clusters'][cluster][cad] = 'masked'
                     except KeyError:
                         pass
                     try:
                         for user in config['users']:
-                            config['users'][user]['client-certificate-data'] = 'masked'
+                            config['users'][user][cad] = 'masked'
                             config['users'][user]['client-key-data'] = 'masked'
                     except KeyError:
                         pass
 
                     current_config['kubeconfig'] = config
+
+                # override pylint broad-except warning, since we do not want
+                # to bubble up any exceptions if openshift ex config view
+                # fails
+                # pylint: disable=broad-except
                 except Exception:
                     pass
 
@@ -139,7 +162,10 @@ class OpenShiftFacts():
             if ip_value:
                 facts['common'][ip_var] = ip_value
 
-            facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+            facts['common'][h_var] = self.choose_hostname(
+                [provider_facts['network'].get(h_var)],
+                facts['common'][ip_var]
+            )
 
         if 'node' in roles:
             ext_id = provider_facts.get('external_id')
@@ -158,32 +184,37 @@ class OpenShiftFacts():
 
         return True
 
-    def choose_hostname(self, hostnames=[], fallback=''):
+    def choose_hostname(self, hostnames=None, fallback=''):
         hostname = fallback
+        if hostnames is None:
+            return hostname
 
-        ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
-        hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+        ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
+        ips = [i for i in hostnames
+               if (i is not None and isinstance(i, basestring)
+                   and re.match(ip_regex, i))]
+        hosts = [i for i in hostnames
+                 if i is not None and i != '' and i not in ips]
 
         for host_list in (hosts, ips):
-            for h in host_list:
-                if self.hostname_valid(h):
-                    return h
+            for host in host_list:
+                if self.hostname_valid(host):
+                    return host
 
         return hostname
 
     def get_defaults(self, roles):
-        ansible_facts = self.get_ansible_facts()
-
         defaults = dict()
 
         common = dict(use_openshift_sdn=True)
-        ip = ansible_facts['default_ipv4']['address']
-        common['ip'] = ip
-        common['public_ip'] = ip
-
-        rc, output, error = module.run_command(['hostname', '-f'])
-        hostname_f = output.strip() if rc == 0 else ''
-        hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
+        ip_addr = self.system_facts['default_ipv4']['address']
+        common['ip'] = ip_addr
+        common['public_ip'] = ip_addr
+
+        exit_code, output, _ = module.run_command(['hostname', '-f'])
+        hostname_f = output.strip() if exit_code == 0 else ''
+        hostname_values = [hostname_f, self.system_facts['nodename'],
+                           self.system_facts['fqdn']]
         hostname = self.choose_hostname(hostname_values)
 
         common['hostname'] = hostname
@@ -195,16 +226,18 @@ class OpenShiftFacts():
             # the urls, instead of forcing both, also to override the hostname
             # without having to re-generate these urls later
             master = dict(api_use_ssl=True, api_port='8443',
-                    console_use_ssl=True, console_path='/console',
-                    console_port='8443', etcd_use_ssl=False,
-                    etcd_port='4001', portal_net='172.30.17.0/24')
+                          console_use_ssl=True, console_path='/console',
+                          console_port='8443', etcd_use_ssl=False,
+                          etcd_port='4001', portal_net='172.30.17.0/24')
             defaults['master'] = master
 
         if 'node' in roles:
             node = dict(external_id=common['hostname'], pod_cidr='',
                         labels={}, annotations={})
-            node['resources_cpu'] = ansible_facts['processor_cores']
-            node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+            node['resources_cpu'] = self.system_facts['processor_cores']
+            node['resources_memory'] = int(
+                int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75
+            )
             defaults['node'] = node
 
         return defaults
@@ -225,13 +258,13 @@ class OpenShiftFacts():
         return facts
 
     def query_metadata(self, metadata_url, headers=None, expect_json=False):
-        r, info = fetch_url(module, metadata_url, headers=headers)
+        result, info = fetch_url(module, metadata_url, headers=headers)
         if info['status'] != 200:
             raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
         if expect_json:
-            return module.from_json(r.read())
+            return module.from_json(result.read())
         else:
-            return [line.strip() for line in r.readlines()]
+            return [line.strip() for line in result.readlines()]
 
     def walk_metadata(self, metadata_url, headers=None, expect_json=False):
         metadata = dict()
@@ -239,49 +272,56 @@ class OpenShiftFacts():
         for line in self.query_metadata(metadata_url, headers, expect_json):
             if line.endswith('/') and not line == 'public-keys/':
                 key = line[:-1]
-                metadata[key]=self.walk_metadata(metadata_url + line, headers,
-                                                 expect_json)
+                metadata[key] = self.walk_metadata(metadata_url + line,
+                                                   headers, expect_json)
             else:
                 results = self.query_metadata(metadata_url + line, headers,
                                               expect_json)
                 if len(results) == 1:
+                    # disable pylint maybe-no-member because overloaded use of
+                    # the module name causes pylint to not detect that results
+                    # is an array or hash
+                    # pylint: disable=maybe-no-member
                     metadata[line] = results.pop()
                 else:
                     metadata[line] = results
         return metadata
 
     def get_provider_metadata(self, metadata_url, supports_recursive=False,
-                          headers=None, expect_json=False):
+                              headers=None, expect_json=False):
         try:
             if supports_recursive:
-                metadata = self.query_metadata(metadata_url, headers, expect_json)
+                metadata = self.query_metadata(metadata_url, headers,
+                                               expect_json)
             else:
-                metadata = self.walk_metadata(metadata_url, headers, expect_json)
-        except OpenShiftFactsMetadataUnavailableError as e:
+                metadata = self.walk_metadata(metadata_url, headers,
+                                              expect_json)
+        except OpenShiftFactsMetadataUnavailableError:
             metadata = None
         return metadata
 
-    def get_ansible_facts(self):
-        if not hasattr(self, 'ansible_facts'):
-            self.ansible_facts = ansible_facts(module)
-        return self.ansible_facts
-
+    # TODO: refactor to reduce the size of this method, potentially create
+    # sub-methods (or classes for the different providers)
+    # temporarily disable pylint too-many-statements
+    # pylint: disable=too-many-statements
     def guess_host_provider(self):
         # TODO: cloud provider facts should probably be submitted upstream
-        ansible_facts = self.get_ansible_facts()
-        product_name = ansible_facts['product_name']
-        product_version = ansible_facts['product_version']
-        virt_type = ansible_facts['virtualization_type']
-        virt_role = ansible_facts['virtualization_role']
+        product_name = self.system_facts['product_name']
+        product_version = self.system_facts['product_version']
+        virt_type = self.system_facts['virtualization_type']
+        virt_role = self.system_facts['virtualization_role']
         provider = None
         metadata = None
 
         # TODO: this is not exposed through module_utils/facts.py in ansible,
         # need to create PR for ansible to expose it
-        bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+        bios_vendor = get_file_content(
+            '/sys/devices/virtual/dmi/id/bios_vendor'
+        )
         if bios_vendor == 'Google':
             provider = 'gce'
-            metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+            metadata_url = ('http://metadata.google.internal/'
+                            'computeMetadata/v1/?recursive=true')
             headers = {'Metadata-Flavor': 'Google'}
             metadata = self.get_provider_metadata(metadata_url, True, headers,
                                                   True)
@@ -290,19 +330,28 @@ class OpenShiftFacts():
             if metadata:
                 metadata['project']['attributes'].pop('sshKeys', None)
                 metadata['instance'].pop('serviceAccounts', None)
-        elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+        elif (virt_type == 'xen' and virt_role == 'guest'
+              and re.match(r'.*\.amazon$', product_version)):
             provider = 'ec2'
             metadata_url = 'http://169.254.169.254/latest/meta-data/'
             metadata = self.get_provider_metadata(metadata_url)
         elif re.search(r'OpenStack', product_name):
             provider = 'openstack'
-            metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
-            metadata = self.get_provider_metadata(metadata_url, True, None, True)
+            metadata_url = ('http://169.254.169.254/openstack/latest/'
+                            'meta_data.json')
+            metadata = self.get_provider_metadata(metadata_url, True, None,
+                                                  True)
 
             if metadata:
                 ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
-                metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
-
+                metadata['ec2_compat'] = self.get_provider_metadata(
+                    ec2_compat_url
+                )
+
+                # disable pylint maybe-no-member because overloaded use of
+                # the module name causes pylint to not detect that results
+                # is an array or hash
+                # pylint: disable=maybe-no-member
                 # Filter public_keys  and random_seed from openstack metadata
                 metadata.pop('public_keys', None)
                 metadata.pop('random_seed', None)
@@ -326,7 +375,8 @@ class OpenShiftFacts():
         if provider == 'gce':
             for interface in metadata['instance']['networkInterfaces']:
                 int_info = dict(ips=[interface['ip']], network_type=provider)
-                int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+                int_info['public_ips'] = [ac['externalIp'] for ac
+                                          in interface['accessConfigs']]
                 int_info['public_ips'].extend(interface['forwardedIps'])
                 _, _, network_id = interface['network'].rpartition('/')
                 int_info['network_id'] = network_id
@@ -346,15 +396,26 @@ class OpenShiftFacts():
             # TODO: attempt to resolve public_hostname
             network['public_hostname'] = network['public_ip']
         elif provider == 'ec2':
-            for interface in sorted(metadata['network']['interfaces']['macs'].values(),
-                                    key=lambda x: x['device-number']):
+            for interface in sorted(
+                    metadata['network']['interfaces']['macs'].values(),
+                    key=lambda x: x['device-number']
+            ):
                 int_info = dict()
                 var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
                 for ips_var, int_var in var_map.iteritems():
                     ips = interface[int_var]
-                    int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
-                int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
-                int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+                    if isinstance(ips, basestring):
+                        int_info[ips_var] = [ips]
+                    else:
+                        int_info[ips_var] = ips
+                if 'vpc-id' in interface:
+                    int_info['network_type'] = 'vpc'
+                else:
+                    int_info['network_type'] = 'classic'
+                if int_info['network_type'] == 'vpc':
+                    int_info['network_id'] = interface['subnet-id']
+                else:
+                    int_info['network_id'] = None
                 network['interfaces'].append(int_info)
             facts['zone'] = metadata['placement']['availability-zone']
             facts['external_id'] = metadata['instance-id']
@@ -384,7 +445,8 @@ class OpenShiftFacts():
             network['hostname'] = metadata['hostname']
 
             # TODO: verify that public hostname makes sense and is resolvable
-            network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+            pub_h = metadata['ec2_compat']['public-hostname']
+            network['public_hostname'] = pub_h
 
         facts['network'] = network
         return facts
@@ -392,8 +454,8 @@ class OpenShiftFacts():
     def init_provider_facts(self):
         provider_info = self.guess_host_provider()
         provider_facts = self.normalize_provider_facts(
-                provider_info.get('name'),
-                provider_info.get('metadata')
+            provider_info.get('name'),
+            provider_info.get('metadata')
         )
         return provider_facts
 
@@ -402,56 +464,77 @@ class OpenShiftFacts():
         # of openshift.<blah>
         return self.facts
 
-    def init_local_facts(self, facts={}):
+    def init_local_facts(self, facts=None):
         changed = False
+        facts_to_set = {self.role: dict()}
+        if facts is not None:
+            facts_to_set[self.role] = facts
 
-        local_facts = ConfigParser.SafeConfigParser()
-        local_facts.read(self.filename)
-
-        section = self.role
-        if not local_facts.has_section(section):
-            local_facts.add_section(section)
+        # Handle conversion of INI style facts file to json style
+        local_facts = dict()
+        try:
+            ini_facts = ConfigParser.SafeConfigParser()
+            ini_facts.read(self.filename)
+            for section in ini_facts.sections():
+                local_facts[section] = dict()
+                for key, value in ini_facts.items(section):
+                    local_facts[section][key] = value
+
+        except (ConfigParser.MissingSectionHeaderError,
+                ConfigParser.ParsingError):
+            try:
+                with open(self.filename, 'r') as facts_file:
+                    local_facts = json.load(facts_file)
+
+            except (ValueError, IOError) as ex:
+                pass
+
+        for arg in ['labels', 'annotations']:
+            if arg in facts_to_set and isinstance(facts_to_set[arg],
+                                                  basestring):
+                facts_to_set[arg] = module.from_json(facts_to_set[arg])
+
+        new_local_facts = self.merge_facts(local_facts, facts_to_set)
+        for facts in new_local_facts.values():
+            keys_to_delete = []
+            for fact, value in facts.iteritems():
+                if value == "" or value is None:
+                    keys_to_delete.append(fact)
+            for key in keys_to_delete:
+                del facts[key]
+
+        if new_local_facts != local_facts:
             changed = True
 
-        for key, value in facts.iteritems():
-            if isinstance(value, bool):
-                value = str(value)
-            if not value:
-                continue
-            if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
-                local_facts.set(section, key, value)
-                changed = True
-
-        if changed and not module.check_mode:
-            try:
-                fact_dir = os.path.dirname(self.filename)
-                if not os.path.exists(fact_dir):
-                    os.makedirs(fact_dir)
-                with open(self.filename, 'w') as fact_file:
-                        local_facts.write(fact_file)
-            except (IOError, OSError) as e:
-                raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+            if not module.check_mode:
+                try:
+                    fact_dir = os.path.dirname(self.filename)
+                    if not os.path.exists(fact_dir):
+                        os.makedirs(fact_dir)
+                    with open(self.filename, 'w') as fact_file:
+                        fact_file.write(module.jsonify(new_local_facts))
+                except (IOError, OSError) as ex:
+                    raise OpenShiftFactsFileWriteError(
+                        "Could not create fact file: "
+                        "%s, error: %s" % (self.filename, ex)
+                    )
         self.changed = changed
-
-        role_facts = dict()
-        for section in local_facts.sections():
-            role_facts[section] = dict()
-            for opt, val in local_facts.items(section):
-                role_facts[section][opt] = val
-        return role_facts
+        return new_local_facts
 
 
 def main():
+    # disabling pylint errors for global-variable-undefined and invalid-name
+    # for 'global module' usage, since it is required to use ansible_facts
+    # pylint: disable=global-variable-undefined, invalid-name
     global module
     module = AnsibleModule(
-            argument_spec = dict(
-                    role=dict(default='common',
-                              choices=OpenShiftFacts.known_roles,
-                              required=False),
-                    local_facts=dict(default={}, type='dict', required=False),
-            ),
-            supports_check_mode=True,
-            add_file_common_args=True,
+        argument_spec=dict(
+            role=dict(default='common', required=False,
+                      choices=OpenShiftFacts.known_roles),
+            local_facts=dict(default=None, type='dict', required=False),
+        ),
+        supports_check_mode=True,
+        add_file_common_args=True,
     )
 
     role = module.params['role']
@@ -464,11 +547,13 @@ def main():
     file_params['path'] = fact_file
     file_args = module.load_file_common_arguments(file_params)
     changed = module.set_fs_attributes_if_different(file_args,
-            openshift_facts.changed)
+                                                    openshift_facts.changed)
 
     return module.exit_json(changed=changed,
-            ansible_facts=openshift_facts.get_facts())
+                            ansible_facts=openshift_facts.get_facts())
 
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
 # import module snippets
 from ansible.module_utils.basic import *
 from ansible.module_utils.facts import *
-- 
cgit v1.2.3


From 1c42ff73d416e9f48ca37593e814faaff7e0f338 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 6 May 2015 13:42:40 -0400
Subject: pylint fixes

---
 roles/openshift_facts/library/openshift_facts.py | 905 ++++++++++++++---------
 1 file changed, 553 insertions(+), 352 deletions(-)

diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index bb40a9569..ec27b5697 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -4,12 +4,8 @@
 # disable pylint checks
 # temporarily disabled until items can be addressed:
 #   fixme - until all TODO comments have been addressed
-# permanently disabled unless someone wants to refactor the object model:
-    #   no-self-use
-    #   too-many-locals
-    #   too-many-branches
-    # pylint:disable=fixme, no-self-use
-    # pylint:disable=too-many-locals, too-many-branches
+# pylint:disable=fixme
+"""Ansible module for retrieving and setting openshift related facts"""
 
 DOCUMENTATION = '''
 ---
@@ -24,16 +20,514 @@ EXAMPLES = '''
 import ConfigParser
 import copy
 
+
+def hostname_valid(hostname):
+    """ Test if specified hostname should be considered valid
+
+        Args:
+            hostname (str): hostname to test
+        Returns:
+            bool: True if valid, otherwise False
+    """
+    if (not hostname or
+            hostname.startswith('localhost') or
+            hostname.endswith('localdomain') or
+            len(hostname.split('.')) < 2):
+        return False
+
+    return True
+
+
+def choose_hostname(hostnames=None, fallback=''):
+    """ Choose a hostname from the provided hostnames
+
+        Given a list of hostnames and a fallback value, choose a hostname to
+        use. This function will prefer fqdns if they exist (excluding any that
+        begin with localhost or end with localdomain) over ip addresses.
+
+        Args:
+            hostnames (list): list of hostnames
+            fallback (str): default value to set if hostnames does not contain
+                            a valid hostname
+        Returns:
+            str: chosen hostname
+    """
+    hostname = fallback
+    if hostnames is None:
+        return hostname
+
+    ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
+    ips = [i for i in hostnames
+           if (i is not None and isinstance(i, basestring)
+               and re.match(ip_regex, i))]
+    hosts = [i for i in hostnames
+             if i is not None and i != '' and i not in ips]
+
+    for host_list in (hosts, ips):
+        for host in host_list:
+            if hostname_valid(host):
+                return host
+
+    return hostname
+
+
+def query_metadata(metadata_url, headers=None, expect_json=False):
+    """ Return metadata from the provided metadata_url
+
+        Args:
+            metadata_url (str): metadata url
+            headers (dict): headers to set for metadata request
+            expect_json (bool): does the metadata_url return json
+        Returns:
+            dict or list: metadata request result
+    """
+    result, info = fetch_url(module, metadata_url, headers=headers)
+    if info['status'] != 200:
+        raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
+    if expect_json:
+        return module.from_json(result.read())
+    else:
+        return [line.strip() for line in result.readlines()]
+
+
+def walk_metadata(metadata_url, headers=None, expect_json=False):
+    """ Walk the metadata tree and return a dictionary of the entire tree
+
+        Args:
+            metadata_url (str): metadata url
+            headers (dict): headers to set for metadata request
+            expect_json (bool): does the metadata_url return json
+        Returns:
+            dict: the result of walking the metadata tree
+    """
+    metadata = dict()
+
+    for line in query_metadata(metadata_url, headers, expect_json):
+        if line.endswith('/') and not line == 'public-keys/':
+            key = line[:-1]
+            metadata[key] = walk_metadata(metadata_url + line,
+                                          headers, expect_json)
+        else:
+            results = query_metadata(metadata_url + line, headers,
+                                     expect_json)
+            if len(results) == 1:
+                # disable pylint maybe-no-member because overloaded use of
+                # the module name causes pylint to not detect that results
+                # is an array or hash
+                # pylint: disable=maybe-no-member
+                metadata[line] = results.pop()
+            else:
+                metadata[line] = results
+    return metadata
+
+
+def get_provider_metadata(metadata_url, supports_recursive=False,
+                          headers=None, expect_json=False):
+    """ Retrieve the provider metadata
+
+        Args:
+            metadata_url (str): metadata url
+            supports_recursive (bool): does the provider metadata api support
+                                       recursion
+            headers (dict): headers to set for metadata request
+            expect_json (bool): does the metadata_url return json
+        Returns:
+            dict: the provider metadata
+    """
+    try:
+        if supports_recursive:
+            metadata = query_metadata(metadata_url, headers,
+                                      expect_json)
+        else:
+            metadata = walk_metadata(metadata_url, headers,
+                                     expect_json)
+    except OpenShiftFactsMetadataUnavailableError:
+        metadata = None
+    return metadata
+
+
+def normalize_gce_facts(metadata, facts):
+    """ Normalize gce facts
+
+        Args:
+            metadata (dict): provider metadata
+            facts (dict): facts to update
+        Returns:
+            dict: the result of adding the normalized metadata to the provided
+                  facts dict
+    """
+    for interface in metadata['instance']['networkInterfaces']:
+        int_info = dict(ips=[interface['ip']], network_type='gce')
+        int_info['public_ips'] = [ac['externalIp'] for ac
+                                  in interface['accessConfigs']]
+        int_info['public_ips'].extend(interface['forwardedIps'])
+        _, _, network_id = interface['network'].rpartition('/')
+        int_info['network_id'] = network_id
+        facts['network']['interfaces'].append(int_info)
+    _, _, zone = metadata['instance']['zone'].rpartition('/')
+    facts['zone'] = zone
+    facts['external_id'] = metadata['instance']['id']
+
+    # Default to no sdn for GCE deployments
+    facts['use_openshift_sdn'] = False
+
+    # GCE currently only supports a single interface
+    facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
+    pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
+    facts['network']['public_ip'] = pub_ip
+    facts['network']['hostname'] = metadata['instance']['hostname']
+
+    # TODO: attempt to resolve public_hostname
+    facts['network']['public_hostname'] = facts['network']['public_ip']
+
+    return facts
+
+
+def normalize_aws_facts(metadata, facts):
+    """ Normalize aws facts
+
+        Args:
+            metadata (dict): provider metadata
+            facts (dict): facts to update
+        Returns:
+            dict: the result of adding the normalized metadata to the provided
+                  facts dict
+    """
+    for interface in sorted(
+            metadata['network']['interfaces']['macs'].values(),
+            key=lambda x: x['device-number']
+    ):
+        int_info = dict()
+        var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
+        for ips_var, int_var in var_map.iteritems():
+            ips = interface[int_var]
+            if isinstance(ips, basestring):
+                int_info[ips_var] = [ips]
+            else:
+                int_info[ips_var] = ips
+        if 'vpc-id' in interface:
+            int_info['network_type'] = 'vpc'
+        else:
+            int_info['network_type'] = 'classic'
+        if int_info['network_type'] == 'vpc':
+            int_info['network_id'] = interface['subnet-id']
+        else:
+            int_info['network_id'] = None
+        facts['network']['interfaces'].append(int_info)
+    facts['zone'] = metadata['placement']['availability-zone']
+    facts['external_id'] = metadata['instance-id']
+
+    # TODO: actually attempt to determine default local and public ips
+    # by using the ansible default ip fact and the ipv4-associations
+    # from the ec2 metadata
+    facts['network']['ip'] = metadata['local-ipv4']
+    facts['network']['public_ip'] = metadata['public-ipv4']
+
+    # TODO: verify that local hostname makes sense and is resolvable
+    facts['network']['hostname'] = metadata['local-hostname']
+
+    # TODO: verify that public hostname makes sense and is resolvable
+    facts['network']['public_hostname'] = metadata['public-hostname']
+
+    return facts
+
+
+def normalize_openstack_facts(metadata, facts):
+    """ Normalize openstack facts
+
+        Args:
+            metadata (dict): provider metadata
+            facts (dict): facts to update
+        Returns:
+            dict: the result of adding the normalized metadata to the provided
+                  facts dict
+    """
+    # openstack ec2 compat api does not support network interfaces and
+    # the version tested on did not include the info in the openstack
+    # metadata api, should be updated if neutron exposes this.
+
+    facts['zone'] = metadata['availability_zone']
+    facts['external_id'] = metadata['uuid']
+    facts['network']['ip'] = metadata['ec2_compat']['local-ipv4']
+    facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
+
+    # TODO: verify local hostname makes sense and is resolvable
+    facts['network']['hostname'] = metadata['hostname']
+
+    # TODO: verify that public hostname makes sense and is resolvable
+    pub_h = metadata['ec2_compat']['public-hostname']
+    facts['network']['public_hostname'] = pub_h
+
+    return facts
+
+
+def normalize_provider_facts(provider, metadata):
+    """ Normalize provider facts
+
+        Args:
+            provider (str): host provider
+            metadata (dict): provider metadata
+        Returns:
+            dict: the normalized provider facts
+    """
+    if provider is None or metadata is None:
+        return {}
+
+    # TODO: test for ipv6_enabled where possible (gce, aws do not support)
+    # and configure ipv6 facts if available
+
+    # TODO: add support for setting user_data if available
+
+    facts = dict(name=provider, metadata=metadata,
+                 network=dict(interfaces=[], ipv6_enabled=False))
+    if provider == 'gce':
+        facts = normalize_gce_facts(metadata, facts)
+    elif provider == 'ec2':
+        facts = normalize_aws_facts(metadata, facts)
+    elif provider == 'openstack':
+        facts = normalize_openstack_facts(metadata, facts)
+    return facts
+
+
+def set_url_facts_if_unset(facts):
+    """ Set url facts if not already present in facts dict
+
+        Args:
+            facts (dict): existing facts
+        Returns:
+            dict: the facts dict updated with the generated url facts if they
+                  were not already present
+    """
+    if 'master' in facts:
+        for (url_var, use_ssl, port, default) in [
+                ('api_url',
+                 facts['master']['api_use_ssl'],
+                 facts['master']['api_port'],
+                 facts['common']['hostname']),
+                ('public_api_url',
+                 facts['master']['api_use_ssl'],
+                 facts['master']['api_port'],
+                 facts['common']['public_hostname']),
+                ('console_url',
+                 facts['master']['console_use_ssl'],
+                 facts['master']['console_port'],
+                 facts['common']['hostname']),
+                ('public_console_url' 'console_use_ssl',
+                 facts['master']['console_use_ssl'],
+                 facts['master']['console_port'],
+                 facts['common']['public_hostname'])]:
+            if url_var not in facts['master']:
+                scheme = 'https' if use_ssl else 'http'
+                netloc = default
+                if ((scheme == 'https' and port != '443')
+                        or (scheme == 'http' and port != '80')):
+                    netloc = "%s:%s" % (netloc, port)
+                facts['master'][url_var] = urlparse.urlunparse(
+                    (scheme, netloc, '', '', '', '')
+                )
+    return facts
+
+
+def get_current_config(facts):
+    """ Get current openshift config
+
+        Args:
+            facts (dict): existing facts
+        Returns:
+            dict: the facts dict updated with the current openshift config
+    """
+    current_config = dict()
+    roles = [role for role in facts if role not in ['common', 'provider']]
+    for role in roles:
+        if 'roles' in current_config:
+            current_config['roles'].append(role)
+        else:
+            current_config['roles'] = [role]
+
+        # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
+        # determine the location of files.
+
+        # Query kubeconfig settings
+        kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+        if role == 'node':
+            kubeconfig_dir = os.path.join(
+                kubeconfig_dir, "node-%s" % facts['common']['hostname']
+            )
+
+        kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
+        if (os.path.isfile('/usr/bin/openshift')
+                and os.path.isfile(kubeconfig_path)):
+            try:
+                _, output, _ = module.run_command(
+                    ["/usr/bin/openshift", "ex", "config", "view", "-o",
+                     "json", "--kubeconfig=%s" % kubeconfig_path],
+                    check_rc=False
+                )
+                config = json.loads(output)
+
+                cad = 'certificate-authority-data'
+                try:
+                    for cluster in config['clusters']:
+                        config['clusters'][cluster][cad] = 'masked'
+                except KeyError:
+                    pass
+                try:
+                    for user in config['users']:
+                        config['users'][user][cad] = 'masked'
+                        config['users'][user]['client-key-data'] = 'masked'
+                except KeyError:
+                    pass
+
+                current_config['kubeconfig'] = config
+
+            # override pylint broad-except warning, since we do not want
+            # to bubble up any exceptions if openshift ex config view
+            # fails
+            # pylint: disable=broad-except
+            except Exception:
+                pass
+
+    return current_config
+
+
+def apply_provider_facts(facts, provider_facts, roles):
+    """ Apply provider facts to supplied facts dict
+
+        Args:
+            facts (dict): facts dict to update
+            provider_facts (dict): provider facts to apply
+            roles: host roles
+        Returns:
+            dict: the merged facts
+    """
+    if not provider_facts:
+        return facts
+
+    use_openshift_sdn = provider_facts.get('use_openshift_sdn')
+    if isinstance(use_openshift_sdn, bool):
+        facts['common']['use_openshift_sdn'] = use_openshift_sdn
+
+    common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
+    for h_var, ip_var in common_vars:
+        ip_value = provider_facts['network'].get(ip_var)
+        if ip_value:
+            facts['common'][ip_var] = ip_value
+
+        facts['common'][h_var] = choose_hostname(
+            [provider_facts['network'].get(h_var)],
+            facts['common'][ip_var]
+        )
+
+    if 'node' in roles:
+        ext_id = provider_facts.get('external_id')
+        if ext_id:
+            facts['node']['external_id'] = ext_id
+
+    facts['provider'] = provider_facts
+    return facts
+
+
+def merge_facts(orig, new):
+    """ Recursively merge facts dicts
+
+        Args:
+            orig (dict): existing facts
+            new (dict): facts to update
+        Returns:
+            dict: the merged facts
+    """
+    facts = dict()
+    for key, value in orig.iteritems():
+        if key in new:
+            if isinstance(value, dict):
+                facts[key] = merge_facts(value, new[key])
+            else:
+                facts[key] = copy.copy(new[key])
+        else:
+            facts[key] = copy.deepcopy(value)
+    new_keys = set(new.keys()) - set(orig.keys())
+    for key in new_keys:
+        facts[key] = copy.deepcopy(new[key])
+    return facts
+
+
+def save_local_facts(filename, facts):
+    """ Save local facts
+
+        Args:
+            filename (str): local facts file
+            facts (dict): facts to set
+    """
+    try:
+        fact_dir = os.path.dirname(filename)
+        if not os.path.exists(fact_dir):
+            os.makedirs(fact_dir)
+        with open(filename, 'w') as fact_file:
+            fact_file.write(module.jsonify(facts))
+    except (IOError, OSError) as ex:
+        raise OpenShiftFactsFileWriteError(
+            "Could not create fact file: %s, error: %s" % (filename, ex)
+        )
+
+
+def get_local_facts_from_file(filename):
+    """ Retrieve local facts from fact file
+
+        Args:
+            filename (str): local facts file
+        Returns:
+            dict: the retrieved facts
+    """
+    local_facts = dict()
+    try:
+        # Handle conversion of INI style facts file to json style
+        ini_facts = ConfigParser.SafeConfigParser()
+        ini_facts.read(filename)
+        for section in ini_facts.sections():
+            local_facts[section] = dict()
+            for key, value in ini_facts.items(section):
+                local_facts[section][key] = value
+
+    except (ConfigParser.MissingSectionHeaderError,
+            ConfigParser.ParsingError):
+        try:
+            with open(filename, 'r') as facts_file:
+                local_facts = json.load(facts_file)
+        except (ValueError, IOError):
+            pass
+
+    return local_facts
+
+
 class OpenShiftFactsUnsupportedRoleError(Exception):
+    """OpenShift Facts Unsupported Role Error"""
     pass
 
+
 class OpenShiftFactsFileWriteError(Exception):
+    """OpenShift Facts File Write Error"""
     pass
 
+
 class OpenShiftFactsMetadataUnavailableError(Exception):
+    """OpenShift Facts Metadata Unavailable Error"""
     pass
 
+
 class OpenShiftFacts(object):
+    """ OpenShift Facts
+
+        Attributes:
+            facts (dict): OpenShift facts for the host
+
+        Args:
+            role (str): role for setting local facts
+            filename (str): local facts file to use
+            local_facts (dict): local facts to set
+
+        Raises:
+            OpenShiftFactsUnsupportedRoleError:
+    """
     known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
 
     def __init__(self, role, filename, local_facts):
@@ -48,162 +542,35 @@ class OpenShiftFacts(object):
         self.facts = self.generate_facts(local_facts)
 
     def generate_facts(self, local_facts):
+        """ Generate facts
+
+            Args:
+                local_facts (dict): local_facts for overriding generated
+                                    defaults
+
+            Returns:
+                dict: The generated facts
+        """
         local_facts = self.init_local_facts(local_facts)
         roles = local_facts.keys()
 
         defaults = self.get_defaults(roles)
         provider_facts = self.init_provider_facts()
-        facts = self.apply_provider_facts(defaults, provider_facts, roles)
-        facts = self.merge_facts(facts, local_facts)
-        facts['current_config'] = self.current_config(facts)
-        self.set_url_facts_if_unset(facts)
+        facts = apply_provider_facts(defaults, provider_facts, roles)
+        facts = merge_facts(facts, local_facts)
+        facts['current_config'] = get_current_config(facts)
+        facts = set_url_facts_if_unset(facts)
         return dict(openshift=facts)
 
+    def get_defaults(self, roles):
+        """ Get default fact values
 
-    def set_url_facts_if_unset(self, facts):
-        if 'master' in facts:
-            for (url_var, use_ssl, port, default) in [
-                    ('api_url',
-                     facts['master']['api_use_ssl'],
-                     facts['master']['api_port'],
-                     facts['common']['hostname']),
-                    ('public_api_url',
-                     facts['master']['api_use_ssl'],
-                     facts['master']['api_port'],
-                     facts['common']['public_hostname']),
-                    ('console_url',
-                     facts['master']['console_use_ssl'],
-                     facts['master']['console_port'],
-                     facts['common']['hostname']),
-                    ('public_console_url' 'console_use_ssl',
-                     facts['master']['console_use_ssl'],
-                     facts['master']['console_port'],
-                     facts['common']['public_hostname'])]:
-                if url_var not in facts['master']:
-                    scheme = 'https' if use_ssl else 'http'
-                    netloc = default
-                    if ((scheme == 'https' and port != '443')
-                            or (scheme == 'http' and port != '80')):
-                        netloc = "%s:%s" % (netloc, port)
-                    facts['master'][url_var] = urlparse.urlunparse(
-                        (scheme, netloc, '', '', '', '')
-                    )
-
-
-    # Query current OpenShift config and return a dictionary containing
-    # settings that may be valuable for determining actions that need to be
-    # taken in the playbooks/roles
-    def current_config(self, facts):
-        current_config = dict()
-        roles = [role for role in facts if role not in ['common', 'provider']]
-        for role in roles:
-            if 'roles' in current_config:
-                current_config['roles'].append(role)
-            else:
-                current_config['roles'] = [role]
-
-            # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
-            # determine the location of files.
-
-            # Query kubeconfig settings
-            kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
-            if role == 'node':
-                kubeconfig_dir = os.path.join(
-                    kubeconfig_dir, "node-%s" % facts['common']['hostname']
-                )
-
-            kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
-            if (os.path.isfile('/usr/bin/openshift')
-                    and os.path.isfile(kubeconfig_path)):
-                try:
-                    _, output, _ = module.run_command(
-                        ["/usr/bin/openshift", "ex", "config", "view", "-o",
-                         "json", "--kubeconfig=%s" % kubeconfig_path],
-                        check_rc=False
-                    )
-                    config = json.loads(output)
-
-                    cad = 'certificate-authority-data'
-                    try:
-                        for cluster in config['clusters']:
-                            config['clusters'][cluster][cad] = 'masked'
-                    except KeyError:
-                        pass
-                    try:
-                        for user in config['users']:
-                            config['users'][user][cad] = 'masked'
-                            config['users'][user]['client-key-data'] = 'masked'
-                    except KeyError:
-                        pass
-
-                    current_config['kubeconfig'] = config
-
-                # override pylint broad-except warning, since we do not want
-                # to bubble up any exceptions if openshift ex config view
-                # fails
-                # pylint: disable=broad-except
-                except Exception:
-                    pass
-
-        return current_config
-
-
-    def apply_provider_facts(self, facts, provider_facts, roles):
-        if not provider_facts:
-            return facts
-
-        use_openshift_sdn = provider_facts.get('use_openshift_sdn')
-        if isinstance(use_openshift_sdn, bool):
-            facts['common']['use_openshift_sdn'] = use_openshift_sdn
-
-        common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
-        for h_var, ip_var in common_vars:
-            ip_value = provider_facts['network'].get(ip_var)
-            if ip_value:
-                facts['common'][ip_var] = ip_value
-
-            facts['common'][h_var] = self.choose_hostname(
-                [provider_facts['network'].get(h_var)],
-                facts['common'][ip_var]
-            )
-
-        if 'node' in roles:
-            ext_id = provider_facts.get('external_id')
-            if ext_id:
-                facts['node']['external_id'] = ext_id
-
-        facts['provider'] = provider_facts
-        return facts
-
-    def hostname_valid(self, hostname):
-        if (not hostname or
-                hostname.startswith('localhost') or
-                hostname.endswith('localdomain') or
-                len(hostname.split('.')) < 2):
-            return False
-
-        return True
-
-    def choose_hostname(self, hostnames=None, fallback=''):
-        hostname = fallback
-        if hostnames is None:
-            return hostname
-
-        ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
-        ips = [i for i in hostnames
-               if (i is not None and isinstance(i, basestring)
-                   and re.match(ip_regex, i))]
-        hosts = [i for i in hostnames
-                 if i is not None and i != '' and i not in ips]
-
-        for host_list in (hosts, ips):
-            for host in host_list:
-                if self.hostname_valid(host):
-                    return host
-
-        return hostname
+            Args:
+                roles (list): list of roles for this host
 
-    def get_defaults(self, roles):
+            Returns:
+                dict: The generated default facts
+        """
         defaults = dict()
 
         common = dict(use_openshift_sdn=True)
@@ -215,16 +582,13 @@ class OpenShiftFacts(object):
         hostname_f = output.strip() if exit_code == 0 else ''
         hostname_values = [hostname_f, self.system_facts['nodename'],
                            self.system_facts['fqdn']]
-        hostname = self.choose_hostname(hostname_values)
+        hostname = choose_hostname(hostname_values)
 
         common['hostname'] = hostname
         common['public_hostname'] = hostname
         defaults['common'] = common
 
         if 'master' in roles:
-            # TODO: provide for a better way to override just the port, or just
-            # the urls, instead of forcing both, also to override the hostname
-            # without having to re-generate these urls later
             master = dict(api_use_ssl=True, api_port='8443',
                           console_use_ssl=True, console_path='/console',
                           console_port='8443', etcd_use_ssl=False,
@@ -242,69 +606,12 @@ class OpenShiftFacts(object):
 
         return defaults
 
-    def merge_facts(self, orig, new):
-        facts = dict()
-        for key, value in orig.iteritems():
-            if key in new:
-                if isinstance(value, dict):
-                    facts[key] = self.merge_facts(value, new[key])
-                else:
-                    facts[key] = copy.copy(new[key])
-            else:
-                facts[key] = copy.deepcopy(value)
-        new_keys = set(new.keys()) - set(orig.keys())
-        for key in new_keys:
-            facts[key] = copy.deepcopy(new[key])
-        return facts
-
-    def query_metadata(self, metadata_url, headers=None, expect_json=False):
-        result, info = fetch_url(module, metadata_url, headers=headers)
-        if info['status'] != 200:
-            raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
-        if expect_json:
-            return module.from_json(result.read())
-        else:
-            return [line.strip() for line in result.readlines()]
-
-    def walk_metadata(self, metadata_url, headers=None, expect_json=False):
-        metadata = dict()
-
-        for line in self.query_metadata(metadata_url, headers, expect_json):
-            if line.endswith('/') and not line == 'public-keys/':
-                key = line[:-1]
-                metadata[key] = self.walk_metadata(metadata_url + line,
-                                                   headers, expect_json)
-            else:
-                results = self.query_metadata(metadata_url + line, headers,
-                                              expect_json)
-                if len(results) == 1:
-                    # disable pylint maybe-no-member because overloaded use of
-                    # the module name causes pylint to not detect that results
-                    # is an array or hash
-                    # pylint: disable=maybe-no-member
-                    metadata[line] = results.pop()
-                else:
-                    metadata[line] = results
-        return metadata
-
-    def get_provider_metadata(self, metadata_url, supports_recursive=False,
-                              headers=None, expect_json=False):
-        try:
-            if supports_recursive:
-                metadata = self.query_metadata(metadata_url, headers,
-                                               expect_json)
-            else:
-                metadata = self.walk_metadata(metadata_url, headers,
-                                              expect_json)
-        except OpenShiftFactsMetadataUnavailableError:
-            metadata = None
-        return metadata
-
-    # TODO: refactor to reduce the size of this method, potentially create
-    # sub-methods (or classes for the different providers)
-    # temporarily disable pylint too-many-statements
-    # pylint: disable=too-many-statements
     def guess_host_provider(self):
+        """ Guess the host provider
+
+            Returns:
+                dict: The generated default facts for the detected provider
+        """
         # TODO: cloud provider facts should probably be submitted upstream
         product_name = self.system_facts['product_name']
         product_version = self.system_facts['product_version']
@@ -323,8 +630,8 @@ class OpenShiftFacts(object):
             metadata_url = ('http://metadata.google.internal/'
                             'computeMetadata/v1/?recursive=true')
             headers = {'Metadata-Flavor': 'Google'}
-            metadata = self.get_provider_metadata(metadata_url, True, headers,
-                                                  True)
+            metadata = get_provider_metadata(metadata_url, True, headers,
+                                             True)
 
             # Filter sshKeys and serviceAccounts from gce metadata
             if metadata:
@@ -334,17 +641,17 @@ class OpenShiftFacts(object):
               and re.match(r'.*\.amazon$', product_version)):
             provider = 'ec2'
             metadata_url = 'http://169.254.169.254/latest/meta-data/'
-            metadata = self.get_provider_metadata(metadata_url)
+            metadata = get_provider_metadata(metadata_url)
         elif re.search(r'OpenStack', product_name):
             provider = 'openstack'
             metadata_url = ('http://169.254.169.254/openstack/latest/'
                             'meta_data.json')
-            metadata = self.get_provider_metadata(metadata_url, True, None,
-                                                  True)
+            metadata = get_provider_metadata(metadata_url, True, None,
+                                             True)
 
             if metadata:
                 ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
-                metadata['ec2_compat'] = self.get_provider_metadata(
+                metadata['ec2_compat'] = get_provider_metadata(
                     ec2_compat_url
                 )
 
@@ -361,140 +668,42 @@ class OpenShiftFacts(object):
 
         return dict(name=provider, metadata=metadata)
 
-    def normalize_provider_facts(self, provider, metadata):
-        if provider is None or metadata is None:
-            return {}
-
-        # TODO: test for ipv6_enabled where possible (gce, aws do not support)
-        # and configure ipv6 facts if available
-
-        # TODO: add support for setting user_data if available
-
-        facts = dict(name=provider, metadata=metadata)
-        network = dict(interfaces=[], ipv6_enabled=False)
-        if provider == 'gce':
-            for interface in metadata['instance']['networkInterfaces']:
-                int_info = dict(ips=[interface['ip']], network_type=provider)
-                int_info['public_ips'] = [ac['externalIp'] for ac
-                                          in interface['accessConfigs']]
-                int_info['public_ips'].extend(interface['forwardedIps'])
-                _, _, network_id = interface['network'].rpartition('/')
-                int_info['network_id'] = network_id
-                network['interfaces'].append(int_info)
-            _, _, zone = metadata['instance']['zone'].rpartition('/')
-            facts['zone'] = zone
-            facts['external_id'] = metadata['instance']['id']
-
-            # Default to no sdn for GCE deployments
-            facts['use_openshift_sdn'] = False
-
-            # GCE currently only supports a single interface
-            network['ip'] = network['interfaces'][0]['ips'][0]
-            network['public_ip'] = network['interfaces'][0]['public_ips'][0]
-            network['hostname'] = metadata['instance']['hostname']
-
-            # TODO: attempt to resolve public_hostname
-            network['public_hostname'] = network['public_ip']
-        elif provider == 'ec2':
-            for interface in sorted(
-                    metadata['network']['interfaces']['macs'].values(),
-                    key=lambda x: x['device-number']
-            ):
-                int_info = dict()
-                var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
-                for ips_var, int_var in var_map.iteritems():
-                    ips = interface[int_var]
-                    if isinstance(ips, basestring):
-                        int_info[ips_var] = [ips]
-                    else:
-                        int_info[ips_var] = ips
-                if 'vpc-id' in interface:
-                    int_info['network_type'] = 'vpc'
-                else:
-                    int_info['network_type'] = 'classic'
-                if int_info['network_type'] == 'vpc':
-                    int_info['network_id'] = interface['subnet-id']
-                else:
-                    int_info['network_id'] = None
-                network['interfaces'].append(int_info)
-            facts['zone'] = metadata['placement']['availability-zone']
-            facts['external_id'] = metadata['instance-id']
-
-            # TODO: actually attempt to determine default local and public ips
-            # by using the ansible default ip fact and the ipv4-associations
-            # form the ec2 metadata
-            network['ip'] = metadata['local-ipv4']
-            network['public_ip'] = metadata['public-ipv4']
-
-            # TODO: verify that local hostname makes sense and is resolvable
-            network['hostname'] = metadata['local-hostname']
-
-            # TODO: verify that public hostname makes sense and is resolvable
-            network['public_hostname'] = metadata['public-hostname']
-        elif provider == 'openstack':
-            # openstack ec2 compat api does not support network interfaces and
-            # the version tested on did not include the info in the openstack
-            # metadata api, should be updated if neutron exposes this.
-
-            facts['zone'] = metadata['availability_zone']
-            facts['external_id'] = metadata['uuid']
-            network['ip'] = metadata['ec2_compat']['local-ipv4']
-            network['public_ip'] = metadata['ec2_compat']['public-ipv4']
-
-            # TODO: verify local hostname makes sense and is resolvable
-            network['hostname'] = metadata['hostname']
-
-            # TODO: verify that public hostname makes sense and is resolvable
-            pub_h = metadata['ec2_compat']['public-hostname']
-            network['public_hostname'] = pub_h
-
-        facts['network'] = network
-        return facts
-
     def init_provider_facts(self):
+        """ Initialize the provider facts
+
+            Returns:
+                dict: The normalized provider facts
+        """
         provider_info = self.guess_host_provider()
-        provider_facts = self.normalize_provider_facts(
+        provider_facts = normalize_provider_facts(
             provider_info.get('name'),
             provider_info.get('metadata')
         )
         return provider_facts
 
-    def get_facts(self):
-        # TODO: transform facts into cleaner format (openshift_<blah> instead
-        # of openshift.<blah>
-        return self.facts
-
     def init_local_facts(self, facts=None):
+        """ Initialize the provider facts
+
+            Args:
+                facts (dict): local facts to set
+
+            Returns:
+                dict: The result of merging the provided facts with existing
+                      local facts
+        """
         changed = False
         facts_to_set = {self.role: dict()}
         if facts is not None:
             facts_to_set[self.role] = facts
 
-        # Handle conversion of INI style facts file to json style
-        local_facts = dict()
-        try:
-            ini_facts = ConfigParser.SafeConfigParser()
-            ini_facts.read(self.filename)
-            for section in ini_facts.sections():
-                local_facts[section] = dict()
-                for key, value in ini_facts.items(section):
-                    local_facts[section][key] = value
-
-        except (ConfigParser.MissingSectionHeaderError,
-                ConfigParser.ParsingError):
-            try:
-                with open(self.filename, 'r') as facts_file:
-                    local_facts = json.load(facts_file)
-
-            except (ValueError, IOError) as ex:
-                pass
+        local_facts = get_local_facts_from_file(self.filename)
 
         for arg in ['labels', 'annotations']:
             if arg in facts_to_set and isinstance(facts_to_set[arg],
                                                   basestring):
                 facts_to_set[arg] = module.from_json(facts_to_set[arg])
 
-        new_local_facts = self.merge_facts(local_facts, facts_to_set)
+        new_local_facts = merge_facts(local_facts, facts_to_set)
         for facts in new_local_facts.values():
             keys_to_delete = []
             for fact, value in facts.iteritems():
@@ -507,22 +716,14 @@ class OpenShiftFacts(object):
             changed = True
 
             if not module.check_mode:
-                try:
-                    fact_dir = os.path.dirname(self.filename)
-                    if not os.path.exists(fact_dir):
-                        os.makedirs(fact_dir)
-                    with open(self.filename, 'w') as fact_file:
-                        fact_file.write(module.jsonify(new_local_facts))
-                except (IOError, OSError) as ex:
-                    raise OpenShiftFactsFileWriteError(
-                        "Could not create fact file: "
-                        "%s, error: %s" % (self.filename, ex)
-                    )
+                save_local_facts(self.filename, new_local_facts)
+
         self.changed = changed
         return new_local_facts
 
 
 def main():
+    """ main """
     # disabling pylint errors for global-variable-undefined and invalid-name
     # for 'global module' usage, since it is required to use ansible_facts
     # pylint: disable=global-variable-undefined, invalid-name
@@ -550,7 +751,7 @@ def main():
                                                     openshift_facts.changed)
 
     return module.exit_json(changed=changed,
-                            ansible_facts=openshift_facts.get_facts())
+                            ansible_facts=openshift_facts.facts)
 
 # ignore pylint errors related to the module_utils import
 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-- 
cgit v1.2.3


From c28d4ec46ff9152ae5c91837cc29423805af6bf3 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Wed, 6 May 2015 16:17:13 -0400
Subject: added '-e all' to ohi and fixed pylint errors.

---
 bin/ohi                          |   4 +-
 bin/openshift_ansible/awsutil.py | 138 +++++++++++++++++++++++++++------------
 2 files changed, 100 insertions(+), 42 deletions(-)

diff --git a/bin/ohi b/bin/ohi
index bb52166df..24a027be2 100755
--- a/bin/ohi
+++ b/bin/ohi
@@ -47,12 +47,12 @@ class Ohi(object):
            self.args.env is not None:
             # Both env and host-type specified
             hosts = self.aws.get_host_list(host_type=self.args.host_type, \
-                                           env=self.args.env)
+                                           envs=self.args.env)
 
         if self.args.host_type is None and \
            self.args.env is not None:
             # Only env specified
-            hosts = self.aws.get_host_list(env=self.args.env)
+            hosts = self.aws.get_host_list(envs=self.args.env)
 
         if self.args.host_type is not None and \
            self.args.env is None:
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 65b269930..8b365faa9 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -1,16 +1,37 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 
+"""This module comprises Aws specific utility functions."""
+
 import subprocess
 import os
 import json
 import re
 
 class ArgumentError(Exception):
+    """This class is raised when improper arguments are passed."""
+
     def __init__(self, message):
+        """Initialize an ArgumentError.
+
+        Keyword arguments:
+        message -- the exact error message being raised
+        """
+        super(ArgumentError, self).__init__()
         self.message = message
 
 class AwsUtil(object):
-    def __init__(self, inventory_path=None, host_type_aliases={}):
+    """This class contains the AWS utility functions."""
+
+    def __init__(self, inventory_path=None, host_type_aliases=None):
+        """Initialize the AWS utility class.
+
+        Keyword arguments:
+        inventory_path    -- the path to find the inventory script
+        host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
+        """
+
+        host_type_aliases = host_type_aliases or {}
+
         self.host_type_aliases = host_type_aliases
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
@@ -26,6 +47,7 @@ class AwsUtil(object):
         self.setup_host_type_alias_lookup()
 
     def setup_host_type_alias_lookup(self):
+        """Sets up the alias to host-type lookup table."""
         self.alias_lookup = {}
         for key, values in self.host_type_aliases.iteritems():
             for value in values:
@@ -33,7 +55,13 @@ class AwsUtil(object):
 
 
 
-    def get_inventory(self,args=[]):
+    def get_inventory(self, args=None):
+        """Calls the inventory script and returns a dictionary containing the inventory."
+
+        Keyword arguments:
+        args -- optional arguments to pass to the inventory script
+        """
+        args = args or []
         cmd = [self.inventory_path]
 
         if args:
@@ -41,73 +69,78 @@ class AwsUtil(object):
 
         env = os.environ
 
-        p = subprocess.Popen(cmd, stderr=subprocess.PIPE,
-                         stdout=subprocess.PIPE, env=env)
+        proc = subprocess.Popen(cmd, stderr=subprocess.PIPE,
+                                stdout=subprocess.PIPE, env=env)
 
-        out,err = p.communicate()
+        out, err = proc.communicate()
 
-        if p.returncode != 0:
+        if proc.returncode != 0:
             raise RuntimeError(err)
 
         return json.loads(out.strip())
 
     def get_environments(self):
+        """Searches for env tags in the inventory and returns all of the envs found."""
         pattern = re.compile(r'^tag_environment_(.*)')
 
         envs = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                envs.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                envs.append(matched.group(1))
 
         envs.sort()
         return envs
 
     def get_host_types(self):
+        """Searches for host-type tags in the inventory and returns all host-types found."""
         pattern = re.compile(r'^tag_host-type_(.*)')
 
         host_types = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                host_types.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                host_types.append(matched.group(1))
 
         host_types.sort()
         return host_types
 
     def get_security_groups(self):
+        """Searches for security_groups in the inventory and returns all SGs found."""
         pattern = re.compile(r'^security_group_(.*)')
 
         groups = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                groups.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                groups.append(matched.group(1))
 
         groups.sort()
         return groups
 
-    def build_host_dict_by_env(self, args=[]):
+    def build_host_dict_by_env(self, args=None):
+        """Searches the inventory for hosts in an env and returns their hostvars."""
+        args = args or []
         inv = self.get_inventory(args)
 
         inst_by_env = {}
-        for dns, host in inv['_meta']['hostvars'].items():
+        for _, host in inv['_meta']['hostvars'].items():
             # If you don't have an environment tag, we're going to ignore you
             if 'ec2_tag_environment' not in host:
                 continue
 
             if host['ec2_tag_environment'] not in inst_by_env:
                 inst_by_env[host['ec2_tag_environment']] = {}
-            host_id = "%s:%s" % (host['ec2_tag_Name'],host['ec2_id'])
+            host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
             inst_by_env[host['ec2_tag_environment']][host_id] = host
 
         return inst_by_env
 
-    # Display host_types
     def print_host_types(self):
+        """Gets the list of host types and aliases and outputs them in columns."""
         host_types = self.get_host_types()
         ht_format_str = "%35s"
         alias_format_str = "%-20s"
@@ -117,22 +150,31 @@ class AwsUtil(object):
         print combined_format_str % ('Host Types', 'Aliases')
         print combined_format_str % ('----------', '-------')
 
-        for ht in host_types:
+        for host_type in host_types:
             aliases = []
-            if ht in self.host_type_aliases:
-                aliases = self.host_type_aliases[ht]
-                print combined_format_str % (ht, ", ".join(aliases))
+            if host_type in self.host_type_aliases:
+                aliases = self.host_type_aliases[host_type]
+                print combined_format_str % (host_type, ", ".join(aliases))
             else:
-                print  ht_format_str % ht
+                print  ht_format_str % host_type
         print
 
-    # Convert host-type aliases to real a host-type
     def resolve_host_type(self, host_type):
+        """Converts a host-type alias into a host-type.
+
+        Keyword arguments:
+        host_type -- The alias or host_type to look up.
+
+        Example (depends on aliases defined in config file):
+            host_type = ex-node
+            returns: openshift-node
+        """
         if self.alias_lookup.has_key(host_type):
             return self.alias_lookup[host_type]
         return host_type
 
-    def gen_env_tag(self, env):
+    @staticmethod
+    def gen_env_tag(env):
         """Generate the environment tag
         """
         return "tag_environment_%s" % env
@@ -149,28 +191,44 @@ class AwsUtil(object):
         host_type = self.resolve_host_type(host_type)
         return "tag_env-host-type_%s-%s" % (env, host_type)
 
-    def get_host_list(self, host_type=None, env=None):
+    def get_host_list(self, host_type=None, envs=None):
         """Get the list of hosts from the inventory using host-type and environment
         """
+        envs = envs or []
         inv = self.get_inventory()
 
-        if host_type is not None and \
-           env is not None:
-            # Both host type and environment were specified
-            env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
-            return inv[env_host_type_tag]
+        # We prefer to deal with a list of environments
+        if issubclass(type(envs), basestring):
+            if envs == 'all':
+                envs = self.get_environments()
+            else:
+                envs = [envs]
 
-        if host_type is None and \
-           env is not None:
+        if host_type and envs:
+            # Both host type and environment were specified
+            retval = []
+            for env in envs:
+                env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+                if env_host_type_tag in inv.keys():
+                    retval += inv[env_host_type_tag]
+            return set(retval)
+
+        if envs and not host_type:
             # Just environment was specified
-            host_type_tag = self.gen_env_tag(env)
-            return inv[host_type_tag]
-
-        if host_type is not None and \
-           env is None:
+            retval = []
+            for env in envs:
+                env_tag = AwsUtil.gen_env_tag(env)
+                if env_tag in inv.keys():
+                    retval += inv[env_tag]
+            return set(retval)
+
+        if host_type and not envs:
             # Just host-type was specified
+            retval = []
             host_type_tag = self.gen_host_type_tag(host_type)
-            return inv[host_type_tag]
+            if host_type_tag in inv.keys():
+                retval = inv[host_type_tag]
+            return set(retval)
 
         # We should never reach here!
         raise ArgumentError("Invalid combination of parameters")
-- 
cgit v1.2.3


From 060dffb973687776a22b5ff34e4843744ca82b05 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Thu, 7 May 2015 14:22:16 -0400
Subject: Automatic commit of package [openshift-ansible-bin] release
 [0.0.13-1].

---
 bin/openshift-ansible-bin.spec         | 5 ++++-
 rel-eng/packages/openshift-ansible-bin | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index 29aaff9ae..9fc79fe6c 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.12
+Version:       0.0.13
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
+- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
+
 * Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
 - fixed opssh and opscp to allow just environment or just host-type.
   (twiest@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index 8a9624397..9d95e8cdd 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.12-1 bin/
+0.0.13-1 bin/
-- 
cgit v1.2.3


From b54e8f81469807cdd6cc57d3c03b22ee1212b4cc Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 7 May 2015 11:09:00 -0400
Subject: Allow option in multi_ec2 to set cache location.

---
 inventory/multi_ec2.py           | 26 ++++++++++++++++++++++++--
 inventory/multi_ec2.yaml.example |  3 +++
 2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index b839a33ea..b99212dd5 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -11,9 +11,12 @@ import yaml
 import os
 import subprocess
 import json
+import errno
+import fcntl
 
 
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
 
 class MultiEc2(object):
     '''
@@ -27,7 +30,6 @@ class MultiEc2(object):
         self.config = None
         self.all_ec2_results = {}
         self.result = {}
-        self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
@@ -48,10 +50,12 @@ class MultiEc2(object):
             self.config = self.load_yaml_config()
         elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
              os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+            # Build a default config
             self.config = {}
             self.config['accounts'] = [
                 {
                     'name': 'default',
+                    'cache_location': DEFAULT_CACHE_PATH,
                     'provider': 'aws/hosts/ec2.py',
                     'env_vars': {
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
@@ -64,6 +68,11 @@ class MultiEc2(object):
         else:
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
+        # Set the default cache path but if its defined we'll assign it.
+        self.cache_path = DEFAULT_CACHE_PATH
+        if self.config.has_key('cache_location'):
+            self.cache_path = self.config['cache_location']
+
         if self.args.refresh_cache:
             self.get_inventory()
             self.write_to_cache()
@@ -222,9 +231,22 @@ class MultiEc2(object):
     def write_to_cache(self):
         ''' Writes data in JSON format to a file '''
 
+        # if it does not exist, try and create it.
+        if not os.path.isfile(self.cache_path):
+            path = os.path.dirname(self.cache_path)
+            try:
+                os.makedirs(path)
+            except OSError as exc:
+                if exc.errno != errno.EEXIST or not os.path.isdir(path):
+                    raise
+
         json_data = MultiEc2.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
-            cache.write(json_data)
+            try:
+                fcntl.flock(cache, fcntl.LOCK_EX)
+                cache.write(json_data)
+            finally:
+                fcntl.flock(cache, fcntl.LOCK_UN)
 
     def get_inventory_from_cache(self):
         ''' Reads the inventory from the cache file and returns it as a JSON
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index 91e7c7970..d8361a49f 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -1,4 +1,7 @@
 # multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
+
 accounts:
   - name: aws1
     provider: aws/hosts/ec2.py
-- 
cgit v1.2.3


From 23a645ada56ef405a476db1f616c8389a4b6639d Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Thu, 7 May 2015 16:44:47 -0400
Subject: fixed build problems with openshift-ansible-inventory.spec

---
 inventory/openshift-ansible-inventory.spec | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index 8267e16f6..c3b6aa8c8 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -25,8 +25,8 @@ mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
 
 cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
 cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+cp -p aws/hosts/ec2.py aws/hosts/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 
 %files
 %config(noreplace) /etc/ansible/*
-- 
cgit v1.2.3


From 9cf97c888698a6fda1a03f0eb5ae5bd74ee2408b Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Thu, 7 May 2015 16:48:25 -0400
Subject: Automatic commit of package [openshift-ansible-inventory] release
 [0.0.3-1].

---
 inventory/openshift-ansible-inventory.spec   | 9 ++++++++-
 rel-eng/packages/openshift-ansible-inventory | 2 +-
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index c3b6aa8c8..f7ca67138 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.2
+Version:       0.0.3
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -37,6 +37,13 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- fixed build problems with openshift-ansible-inventory.spec
+  (twiest@redhat.com)
+- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
+- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
+- Adding refresh-cache option and cleanup for pylint. Also updated for
+  aws/hosts/ being added. (kwoodson@redhat.com)
 * Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
 - added the ability to have a config file in /etc/openshift_ansible to
   multi_ec2.py. (twiest@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index cf3ac87ed..1ea7a9a81 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.2-1 inventory/
+0.0.3-1 inventory/
-- 
cgit v1.2.3


From c6c0463a54af4bdab6810697a1ab5f81ef782f4d Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 7 May 2015 17:36:08 -0400
Subject: Fixed a bug due to renaming of variables.

---
 inventory/multi_ec2.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index b839a33ea..35ac6c7a8 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -182,7 +182,7 @@ class MultiEc2(object):
                 elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
                     for result in input_b[key]:
                         if result not in input_a[key]:
-                            input_a[key].input_append(result)
+                            input_a[key].append(result)
                 # a is a list and not b
                 elif isinstance(input_a[key], list):
                     if input_b[key] not in input_a[key]:
-- 
cgit v1.2.3


From 4705af597cee2ff523aaede4bb2479c0fc6af430 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Thu, 7 May 2015 18:47:03 -0400
Subject: Automatic commit of package [openshift-ansible-inventory] release
 [0.0.4-1].

---
 inventory/openshift-ansible-inventory.spec   | 5 ++++-
 rel-eng/packages/openshift-ansible-inventory | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index f7ca67138..e6bf37988 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.3
+Version:       0.0.4
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -37,6 +37,9 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
+
 * Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
 - fixed build problems with openshift-ansible-inventory.spec
   (twiest@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index 1ea7a9a81..9a0a5af4e 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.3-1 inventory/
+0.0.4-1 inventory/
-- 
cgit v1.2.3


From aa39235f5c938d7535aa71669b7c1bb0727e1c04 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Fri, 8 May 2015 12:25:58 -0400
Subject: renamed ansible.cfg to ansible.cfg.example to avoid ansible
 accidentally picking this up as it's config file.

---
 ansible.cfg         | 23 -----------------------
 ansible.cfg.example | 23 +++++++++++++++++++++++
 2 files changed, 23 insertions(+), 23 deletions(-)
 delete mode 100644 ansible.cfg
 create mode 100644 ansible.cfg.example

diff --git a/ansible.cfg b/ansible.cfg
deleted file mode 100644
index 6a7722ad8..000000000
--- a/ansible.cfg
+++ /dev/null
@@ -1,23 +0,0 @@
-# config file for ansible -- http://ansible.com/
-# ==============================================
-
-# This config file provides examples for running
-# the OpenShift playbooks with the provided
-# inventory scripts. Only global defaults are
-# left uncommented
-
-[defaults]
-# Add the roles directory to the roles path
-roles_path = roles/
-
-# Set the log_path
-log_path = /tmp/ansible.log
-
-# Uncomment to use the provided BYO inventory
-#hostfile = inventory/byo/hosts
-
-# Uncomment to use the provided GCE dynamic inventory script
-#hostfile = inventory/gce/gce.py
-
-# Uncomment to use the provided AWS dynamic inventory script
-#hostfile = inventory/aws/ec2.py
diff --git a/ansible.cfg.example b/ansible.cfg.example
new file mode 100644
index 000000000..6a7722ad8
--- /dev/null
+++ b/ansible.cfg.example
@@ -0,0 +1,23 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+# Uncomment to use the provided BYO inventory
+#hostfile = inventory/byo/hosts
+
+# Uncomment to use the provided GCE dynamic inventory script
+#hostfile = inventory/gce/gce.py
+
+# Uncomment to use the provided AWS dynamic inventory script
+#hostfile = inventory/aws/ec2.py
-- 
cgit v1.2.3


From d9b276629476dc2d6de3ef32717bf2035f4338c2 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Fri, 8 May 2015 15:00:31 -0400
Subject: Adding cache location for multi ec2

---
 bin/oscp                                              | 14 ++++++++------
 bin/ossh                                              | 15 ++++++++-------
 roles/openshift_ansible_inventory/tasks/main.yml      | 19 +++++++++++++++++++
 .../templates/multi_ec2.yaml.j2                       |  1 +
 4 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/bin/oscp b/bin/oscp
index 461ad0a0f..68691ec22 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -32,10 +32,10 @@ class Oscp(object):
         self.aws = awsutil.AwsUtil(self.inventory)
 
         # get a dict of host inventory
-        if self.args.list:
-            self.get_hosts()
-        else:
+        if self.args.refresh_cache:
             self.get_hosts(True)
+        else:
+            self.get_hosts()
 
         if (self.args.src == '' or self.args.dest == '') and not self.args.list:
             self.parser.print_help()
@@ -68,6 +68,8 @@ class Oscp(object):
                           action="store_true", help="debug mode")
         parser.add_argument('-v', '--verbose', default=False,
                           action="store_true", help="Verbose?")
+        parser.add_argument('--refresh-cache', default=False,
+                          action="store_true", help="Force a refresh on the host cache.")
         parser.add_argument('--list', default=False,
                           action="store_true", help="list out hosts")
         parser.add_argument('-r', '--recurse', action='store_true', default=False,
@@ -119,14 +121,14 @@ class Oscp(object):
         else:
             self.env = None
 
-    def get_hosts(self, cache_only=False):
+    def get_hosts(self, refresh_cache=False):
         '''Query our host inventory and return a dict where the format
            equals:
 
            dict['environment'] = [{'servername' : {}}, ]
         '''
-        if cache_only:
-            self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only'])
+        if refresh_cache:
+            self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
         else:
             self.host_inventory = self.aws.build_host_dict_by_env()
 
diff --git a/bin/ossh b/bin/ossh
index c16ea6eda..196430e13 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -28,11 +28,10 @@ class Ossh(object):
 
         self.aws = awsutil.AwsUtil(self.inventory)
 
-        # get a dict of host inventory
-        if self.args.list:
-            self.get_hosts()
-        else:
+        if self.args.refresh_cache:
             self.get_hosts(True)
+        else:
+            self.get_hosts()
 
         # parse host and user
         self.process_host()
@@ -67,6 +66,8 @@ class Ossh(object):
                           action="store_true", help="debug mode")
         parser.add_argument('-v', '--verbose', default=False,
                           action="store_true", help="Verbose?")
+        parser.add_argument('--refresh-cache', default=False,
+                          action="store_true", help="Force a refresh on the host cache.")
         parser.add_argument('--list', default=False,
                           action="store_true", help="list out hosts")
         parser.add_argument('-c', '--command', action='store',
@@ -109,14 +110,14 @@ class Ossh(object):
             if self.args.login_name:
                 self.user = self.args.login_name
 
-    def get_hosts(self, cache_only=False):
+    def get_hosts(self, refresh_cache=False):
         '''Query our host inventory and return a dict where the format
            equals:
 
            dict['servername'] = dns_name
         '''
-        if cache_only:
-            self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only'])
+        if refresh_cache:
+            self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
         else:
             self.host_inventory = self.aws.build_host_dict_by_env()
 
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index dddfe24e3..91c96d827 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -24,6 +24,14 @@
     owner: root
     group: libra_ops
 
+# This cron uses the above location to call its job
+- name: Cron to keep cache fresh
+  cron:
+    name: 'multi_ec2_inventory'
+    minute: '*/10'
+    job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+  when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
+
 - lineinfile:
     dest: /etc/ansible/ansible.cfg
     backrefs: yes
@@ -43,3 +51,14 @@
     regexp: '^destination_format_tags *='
     line: "destination_format_tags = {{ oo_ec2_destination_format_tags }}"
   when: oo_ec2_destination_format_tags is defined
+
+- name: Set cache location
+  file:
+    state: directory
+    dest: "{{ oo_inventory_cache_location | dirname }}"
+    owner: root
+    group: libra_ops
+    recurse: yes
+    mode: '2750'
+  when: oo_inventory_cache_location is defined
+
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
index 23dfe73b8..f7005ff5b 100644
--- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -1,5 +1,6 @@
 # multi ec2 inventory configs
 cache_max_age: {{ oo_inventory_cache_max_age }}
+cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }}
 accounts:
 {% for account in oo_inventory_accounts %}
   - name: {{ account.name }}
-- 
cgit v1.2.3


From 7c905c0cf962ec9b7f5bd140a506bd614831f0e8 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Tue, 12 May 2015 12:20:11 -0400
Subject: Added capability to pass in ec2.ini file.

---
 inventory/multi_ec2.py           | 74 ++++++++++++++++++++++++++++------------
 inventory/multi_ec2.yaml.example | 10 ++++++
 2 files changed, 63 insertions(+), 21 deletions(-)

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 063a80300..d251c6a6a 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -13,7 +13,7 @@ import subprocess
 import json
 import errno
 import fcntl
-
+import tempfile
 
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
 DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
@@ -128,6 +128,54 @@ class MultiEc2(object):
 
         return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
                                 stdout=subprocess.PIPE, env=env)
+
+    @staticmethod
+    def generate_config(config_data):
+        """Generate the ec2.ini file in as a secure temp file.
+           Once generated, pass it to the ec2.py as an environment variable.
+        """
+        fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
+        for section, values in config_data.items():
+            os.write(fildes, "[%s]\n" % section)
+            for option, value  in values.items():
+                os.write(fildes, "%s = %s\n" % (option, value))
+        os.close(fildes)
+        return tmp_file_path
+
+    def run_provider(self):
+        '''Setup the provider call with proper variables
+           and call self.get_provider_tags.
+        '''
+        try:
+            all_results = []
+            tmp_file_path = None
+            processes = {}
+            for account in self.config['accounts']:
+                env = account['env_vars']
+                if account.has_key('provider_config'):
+                    tmp_file_path = MultiEc2.generate_config(account['provider_config'])
+                    env['EC2_INI_PATH'] = tmp_file_path
+                name = account['name']
+                provider = account['provider']
+                processes[name] = self.get_provider_tags(provider, env)
+
+            # for each process collect stdout when its available
+            for name, process in processes.items():
+                out, err = process.communicate()
+                all_results.append({
+                    "name": name,
+                    "out": out.strip(),
+                    "err": err.strip(),
+                    "code": process.returncode
+                })
+
+        finally:
+            # Clean up the mkstemp file
+            if tmp_file_path:
+                os.unlink(tmp_file_path)
+
+        return all_results
+
     def get_inventory(self):
         """Create the subprocess to fetch tags from a provider.
         Host query:
@@ -138,28 +186,12 @@ class MultiEc2(object):
         Query all of the different accounts for their tags.  Once completed
         store all of their results into one merged updated hash.
         """
-        processes = {}
-        for account in self.config['accounts']:
-            env = account['env_vars']
-            name = account['name']
-            provider = account['provider']
-            processes[name] = self.get_provider_tags(provider, env)
-
-        # for each process collect stdout when its available
-        all_results = []
-        for name, process in processes.items():
-            out, err = process.communicate()
-            all_results.append({
-                "name": name,
-                "out": out.strip(),
-                "err": err.strip(),
-                "code": process.returncode
-            })
+        provider_results = self.run_provider()
 
         # process --host results
         if not self.args.host:
             # For any non-zero, raise an error on it
-            for result in all_results:
+            for result in provider_results:
                 if result['code'] != 0:
                     raise RuntimeError(result['err'])
                 else:
@@ -171,9 +203,9 @@ class MultiEc2(object):
         else:
             # For any 0 result, return it
             count = 0
-            for results in all_results:
+            for results in provider_results:
                 if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
-                    self.result = json.loads(out)
+                    self.result = json.loads(results['out'])
                     count += 1
                 if count > 1:
                     raise RuntimeError("Found > 1 results for --host %s. \
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index d8361a49f..c41c134ab 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -5,6 +5,15 @@ cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
 accounts:
   - name: aws1
     provider: aws/hosts/ec2.py
+    provider_config:
+      ec2:
+        regions: all
+        regions_exclude:  us-gov-west-1,cn-north-1
+        destination_variable: public_dns_name
+        route53: False
+        cache_path: ~/.ansible/tmp
+        cache_max_age: 300
+        vpc_destination_variable: ip_address
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -14,5 +23,6 @@ accounts:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+      EC2_INI_PATH: /etc/ansible/ec2.ini
 
 cache_max_age: 60
-- 
cgit v1.2.3


From 4e3972b89a0954b55e7b10917757e07b9e610c3a Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Tue, 12 May 2015 16:37:49 -0400
Subject: removed ec2.ini from the openshift-ansible-inventory.spec file so
 that we're not dictating what the ec2.ini file should look like.

---
 inventory/openshift-ansible-inventory.spec | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index e6bf37988..c1b066afb 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -25,7 +25,7 @@ mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
 
 cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
 cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/hosts/ec2.py aws/hosts/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
 cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 
 %files
@@ -33,7 +33,6 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 %dir /usr/share/ansible/inventory
 /usr/share/ansible/inventory/multi_ec2.py*
 /usr/share/ansible/inventory/aws/ec2.py*
-%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
-- 
cgit v1.2.3


From 22a2616359ee1f167c85ec21bf416350706a7b5b Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Tue, 12 May 2015 16:47:07 -0400
Subject: Automatic commit of package [openshift-ansible-inventory] release
 [0.0.5-1].

---
 inventory/openshift-ansible-inventory.spec   | 7 ++++++-
 rel-eng/packages/openshift-ansible-inventory | 2 +-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index c1b066afb..69bd255d4 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.4
+Version:       0.0.5
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,11 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
+  not dictating what the ec2.ini file should look like. (twiest@redhat.com)
+- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
+
 * Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
 - Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
 
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index 9a0a5af4e..ec3272038 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.4-1 inventory/
+0.0.5-1 inventory/
-- 
cgit v1.2.3


From 968ba15b98263060ed2c3030f27039ba950e7425 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Wed, 13 May 2015 12:09:22 -0400
Subject: Added options for separate ec2.ini files

---
 roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
index f7005ff5b..6e794c194 100644
--- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -5,6 +5,13 @@ accounts:
 {% for account in oo_inventory_accounts %}
   - name: {{ account.name }}
     provider: {{ account.provider }}
+    provider_config:
+{%  for section, items in account.provider_config.items() %}
+      {{ section }}:
+{%    for property, value in items.items() %}
+        {{ property }}: {{ value }}
+{%    endfor %}
+{% endfor %}
     env_vars:
       AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
       AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
-- 
cgit v1.2.3


From d82c71ce9a98c1e9ecabf24cd7bd7c7e19aabec2 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Wed, 13 May 2015 16:38:20 -0400
Subject: Added support for grouping and a bug fix.

---
 inventory/multi_ec2.py           | 64 ++++++++++++++++++++++++++++++----------
 inventory/multi_ec2.yaml.example |  6 +++-
 2 files changed, 53 insertions(+), 17 deletions(-)

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index d251c6a6a..11247f942 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -14,6 +14,7 @@ import json
 import errno
 import fcntl
 import tempfile
+import copy
 
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
 DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
@@ -148,13 +149,13 @@ class MultiEc2(object):
         '''
         try:
             all_results = []
-            tmp_file_path = None
+            tmp_file_paths = []
             processes = {}
             for account in self.config['accounts']:
                 env = account['env_vars']
                 if account.has_key('provider_config'):
-                    tmp_file_path = MultiEc2.generate_config(account['provider_config'])
-                    env['EC2_INI_PATH'] = tmp_file_path
+                    tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
+                    env['EC2_INI_PATH'] = tmp_file_paths[-1]
                 name = account['name']
                 provider = account['provider']
                 processes[name] = self.get_provider_tags(provider, env)
@@ -171,8 +172,8 @@ class MultiEc2(object):
 
         finally:
             # Clean up the mkstemp file
-            if tmp_file_path:
-                os.unlink(tmp_file_path)
+            for tmp_file in tmp_file_paths:
+                os.unlink(tmp_file)
 
         return all_results
 
@@ -189,27 +190,58 @@ class MultiEc2(object):
         provider_results = self.run_provider()
 
         # process --host results
-        if not self.args.host:
+        # For any 0 result, return it
+        if self.args.host:
+            count = 0
+            for results in provider_results:
+                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
+                    self.result = json.loads(results['out'])
+                    count += 1
+                if count > 1:
+                    raise RuntimeError("Found > 1 results for --host %s. \
+                                       This is an invalid state." % self.args.host)
+        # process --list results
+        else:
             # For any non-zero, raise an error on it
             for result in provider_results:
                 if result['code'] != 0:
                     raise RuntimeError(result['err'])
                 else:
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
+
+            # Check if user wants extra vars in yaml by
+            # having hostvars and all_group defined
+            for acc_config in self.config['accounts']:
+                self.apply_account_config(acc_config)
+
+            # Build results by merging all dictionaries
             values = self.all_ec2_results.values()
             values.insert(0, self.result)
             for result in  values:
                 MultiEc2.merge_destructively(self.result, result)
-        else:
-            # For any 0 result, return it
-            count = 0
-            for results in provider_results:
-                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
-                    self.result = json.loads(results['out'])
-                    count += 1
-                if count > 1:
-                    raise RuntimeError("Found > 1 results for --host %s. \
-                                       This is an invalid state." % self.args.host)
+
+    def apply_account_config(self, acc_config):
+        ''' Apply account config settings
+        '''
+        if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
+            return
+
+        results = self.all_ec2_results[acc_config['name']]
+       # Update each hostvar with the newly desired key: value
+        for host_property, value in acc_config['hostvars'].items():
+            # Verify the account results look sane
+            # by checking for these keys ('_meta' and 'hostvars' exist)
+            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
+                for data in results['_meta']['hostvars'].values():
+                    data[str(host_property)] = str(value)
+
+            # Add this group
+            results["%s_%s" % (host_property, value)] = \
+              copy.copy(results[acc_config['all_group']])
+
+        # store the results back into all_ec2_results
+        self.all_ec2_results[acc_config['name']] = results
+
     @staticmethod
     def merge_destructively(input_a, input_b):
         "merges b into input_a"
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index c41c134ab..99f157b11 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -17,8 +17,12 @@ accounts:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+    all_group: ec2
+    hostvars:
+      cloud: aws
+      account: aws1
 
-  - name: aws2
+- name: aws2
     provider: aws/hosts/ec2.py
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-- 
cgit v1.2.3


From 87e53ef9917556a331a81d8a01195c4ac8679bcd Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Wed, 13 May 2015 16:57:57 -0400
Subject: Automatic commit of package [openshift-ansible-inventory] release
 [0.0.6-1].

---
 inventory/openshift-ansible-inventory.spec   | 5 ++++-
 rel-eng/packages/openshift-ansible-inventory | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index 69bd255d4..0fe25ff31 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.5
+Version:       0.0.6
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- Added support for grouping and a bug fix. (kwoodson@redhat.com)
+
 * Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
 - removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
   not dictating what the ec2.ini file should look like. (twiest@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index ec3272038..f4d326d08 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.5-1 inventory/
+0.0.6-1 inventory/
-- 
cgit v1.2.3


From 2657d56660495476d4e64bf4b1a47ebf277770ee Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 14 May 2015 12:14:27 -0400
Subject: Making multi_ec2 into a library

---
 inventory/multi_ec2.py | 39 +++++++++++++++++++++++++++------------
 1 file changed, 27 insertions(+), 12 deletions(-)

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 11247f942..f8196aefd 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -26,8 +26,14 @@ class MultiEc2(object):
             Stores a json hash of resources in result.
     '''
 
-    def __init__(self):
-        self.args = None
+    def __init__(self, args=None):
+        # Allow args to be passed when called as a library
+        if not args:
+            self.args = {}
+        else:
+            self.args = args
+
+        self.cache_path = DEFAULT_CACHE_PATH
         self.config = None
         self.all_ec2_results = {}
         self.result = {}
@@ -44,8 +50,15 @@ class MultiEc2(object):
         else:
             self.config_file = None # expect env vars
 
-        self.parse_cli_args()
 
+    def run(self):
+        '''This method checks to see if the local
+           cache is valid for the inventory.
+
+           if the cache is valid; return cache
+           else the credentials are loaded from multi_ec2.yaml or from the env
+           and we attempt to get the inventory from the provider specified.
+        '''
         # load yaml
         if self.config_file and os.path.isfile(self.config_file):
             self.config = self.load_yaml_config()
@@ -70,15 +83,14 @@ class MultiEc2(object):
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
         # Set the default cache path but if its defined we'll assign it.
-        self.cache_path = DEFAULT_CACHE_PATH
         if self.config.has_key('cache_location'):
             self.cache_path = self.config['cache_location']
 
-        if self.args.refresh_cache:
+        if self.args.get('refresh_cache', None):
             self.get_inventory()
             self.write_to_cache()
         # if its a host query, fetch and do not cache
-        elif self.args.host:
+        elif self.args.get('host', None):
             self.get_inventory()
         elif not self.is_cache_valid():
             # go fetch the inventories and cache them if cache is expired
@@ -119,9 +131,9 @@ class MultiEc2(object):
                         "and that it is executable. (%s)" % provider)
 
         cmds = [provider]
-        if self.args.host:
+        if self.args.get('host', None):
             cmds.append("--host")
-            cmds.append(self.args.host)
+            cmds.append(self.args.get('host', None))
         else:
             cmds.append('--list')
 
@@ -191,7 +203,7 @@ class MultiEc2(object):
 
         # process --host results
         # For any 0 result, return it
-        if self.args.host:
+        if self.args.get('host', None):
             count = 0
             for results in provider_results:
                 if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
@@ -199,7 +211,7 @@ class MultiEc2(object):
                     count += 1
                 if count > 1:
                     raise RuntimeError("Found > 1 results for --host %s. \
-                                       This is an invalid state." % self.args.host)
+                                       This is an invalid state." % self.args.get('host', None))
         # process --list results
         else:
             # For any non-zero, raise an error on it
@@ -290,7 +302,7 @@ class MultiEc2(object):
                             help='List instances (default: True)')
         parser.add_argument('--host', action='store', default=False,
                             help='Get all the variables about a specific instance')
-        self.args = parser.parse_args()
+        self.args = parser.parse_args().__dict__
 
     def write_to_cache(self):
         ''' Writes data in JSON format to a file '''
@@ -340,4 +352,7 @@ class MultiEc2(object):
 
 
 if __name__ == "__main__":
-    print MultiEc2().result_str()
+    MEC2 = MultiEc2()
+    MEC2.parse_cli_args()
+    MEC2.run()
+    print MEC2.result_str()
-- 
cgit v1.2.3


From d9c0bfdbbebc8ca508ddc3f7cee4a41af666f464 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 14 May 2015 13:49:27 -0400
Subject: Added ability to create groupby options on hostvars

---
 roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
index 6e794c194..8228ab915 100644
--- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -15,5 +15,12 @@ accounts:
     env_vars:
       AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
       AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+{% if account.all_group is defined and account.hostvars is defined%}
+    all_group: {{ account.all_group }}
+    hostvars:
+{%    for property, value in account.hostvars.items() %}
+      {{ property }}: {{ value }}
+{%    endfor %}
+{% endif %}
 
 {% endfor %}
-- 
cgit v1.2.3


From 88c7ed4ad437f6705d91e4c1ffb2e88c71fb7db4 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 14 May 2015 16:41:10 -0400
Subject: Command line tools import multi_ec2 as lib

---
 bin/ohi                            |  9 +--------
 bin/openshift_ansible/awsutil.py   | 40 +++++++-------------------------------
 bin/openshift_ansible/multi_ec2.py |  1 +
 bin/oscp                           |  8 +-------
 bin/ossh                           |  8 +-------
 5 files changed, 11 insertions(+), 55 deletions(-)
 create mode 120000 bin/openshift_ansible/multi_ec2.py

diff --git a/bin/ohi b/bin/ohi
index 24a027be2..6f162ac13 100755
--- a/bin/ohi
+++ b/bin/ohi
@@ -17,13 +17,10 @@ from openshift_ansible.awsutil import ArgumentError
 
 CONFIG_MAIN_SECTION = 'main'
 CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-CONFIG_INVENTORY_OPTION = 'inventory'
-
 
 
 class Ohi(object):
     def __init__(self):
-        self.inventory = None
         self.host_type_aliases = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
@@ -35,7 +32,7 @@ class Ohi(object):
         self.parse_cli_args()
         self.parse_config_file()
 
-        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+        self.aws = awsutil.AwsUtil(self.host_type_aliases)
 
     def run(self):
         if self.args.list_host_types:
@@ -76,10 +73,6 @@ class Ohi(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
             self.host_type_aliases = {}
             if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
                 for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 8b365faa9..9df034f57 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -2,10 +2,9 @@
 
 """This module comprises Aws specific utility functions."""
 
-import subprocess
 import os
-import json
 import re
+from openshift_ansible import multi_ec2
 
 class ArgumentError(Exception):
     """This class is raised when improper arguments are passed."""
@@ -22,11 +21,10 @@ class ArgumentError(Exception):
 class AwsUtil(object):
     """This class contains the AWS utility functions."""
 
-    def __init__(self, inventory_path=None, host_type_aliases=None):
+    def __init__(self, host_type_aliases=None):
         """Initialize the AWS utility class.
 
         Keyword arguments:
-        inventory_path    -- the path to find the inventory script
         host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
         """
 
@@ -35,15 +33,6 @@ class AwsUtil(object):
         self.host_type_aliases = host_type_aliases
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
-        if inventory_path is None:
-            inventory_path = os.path.realpath(os.path.join(self.file_path, \
-                                              '..', '..', 'inventory', \
-                                              'multi_ec2.py'))
-
-        if not os.path.isfile(inventory_path):
-            raise Exception("Inventory file not found [%s]" % inventory_path)
-
-        self.inventory_path = inventory_path
         self.setup_host_type_alias_lookup()
 
     def setup_host_type_alias_lookup(self):
@@ -53,31 +42,16 @@ class AwsUtil(object):
             for value in values:
                 self.alias_lookup[value] = key
 
-
-
-    def get_inventory(self, args=None):
+    @staticmethod
+    def get_inventory(args=None):
         """Calls the inventory script and returns a dictionary containing the inventory."
 
         Keyword arguments:
         args -- optional arguments to pass to the inventory script
         """
-        args = args or []
-        cmd = [self.inventory_path]
-
-        if args:
-            cmd.extend(args)
-
-        env = os.environ
-
-        proc = subprocess.Popen(cmd, stderr=subprocess.PIPE,
-                                stdout=subprocess.PIPE, env=env)
-
-        out, err = proc.communicate()
-
-        if proc.returncode != 0:
-            raise RuntimeError(err)
-
-        return json.loads(out.strip())
+        mec2 = multi_ec2.MultiEc2(args)
+        mec2.run()
+        return mec2.result
 
     def get_environments(self):
         """Searches for env tags in the inventory and returns all of the envs found."""
diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py
new file mode 120000
index 000000000..660a0418e
--- /dev/null
+++ b/bin/openshift_ansible/multi_ec2.py
@@ -0,0 +1 @@
+../../inventory/multi_ec2.py
\ No newline at end of file
diff --git a/bin/oscp b/bin/oscp
index 68691ec22..f6dd2ad88 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -11,11 +11,9 @@ import ConfigParser
 from openshift_ansible import awsutil
 
 CONFIG_MAIN_SECTION = 'main'
-CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Oscp(object):
     def __init__(self):
-        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         # Default the config path to /etc
@@ -29,7 +27,7 @@ class Oscp(object):
         # parse host and user
         self.process_host()
 
-        self.aws = awsutil.AwsUtil(self.inventory)
+        self.aws = awsutil.AwsUtil()
 
         # get a dict of host inventory
         if self.args.refresh_cache:
@@ -56,10 +54,6 @@ class Oscp(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env',
diff --git a/bin/ossh b/bin/ossh
index 196430e13..855c5d8b4 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -11,11 +11,9 @@ import ConfigParser
 from openshift_ansible import awsutil
 
 CONFIG_MAIN_SECTION = 'main'
-CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Ossh(object):
     def __init__(self):
-        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         # Default the config path to /etc
@@ -26,7 +24,7 @@ class Ossh(object):
         self.parse_cli_args()
         self.parse_config_file()
 
-        self.aws = awsutil.AwsUtil(self.inventory)
+        self.aws = awsutil.AwsUtil()
 
         if self.args.refresh_cache:
             self.get_hosts(True)
@@ -54,10 +52,6 @@ class Ossh(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",
-- 
cgit v1.2.3


From 701edcaeff9dc1a211a243c792eca3773618be33 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Fri, 15 May 2015 12:36:49 -0400
Subject: Fixed openshift-ansible-bin rpm build

---
 bin/openshift-ansible-bin.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index 9fc79fe6c..c7db74187 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -24,7 +24,7 @@ mkdir -p %{buildroot}/etc/bash_completion.d
 mkdir -p %{buildroot}/etc/openshift_ansible
 
 cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
 cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
 
 cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
-- 
cgit v1.2.3


From b1eab51d5fbc9e8183d89464dfc2e64db160e0c1 Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Fri, 15 May 2015 13:10:48 -0400
Subject: Automatic commit of package [openshift-ansible-bin] release
 [0.0.14-1].

---
 bin/openshift-ansible-bin.spec         | 5 ++++-
 rel-eng/packages/openshift-ansible-bin | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index c7db74187..d6691ea32 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.13
+Version:       0.0.14
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
+- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
+- Adding cache location for multi ec2 (kwoodson@redhat.com)
 * Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
 - added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
 
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index 9d95e8cdd..65c60b560 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.13-1 bin/
+0.0.14-1 bin/
-- 
cgit v1.2.3


From 0ed9652cf6bfc62e36eb63265fc67a7cb79f457a Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Fri, 15 May 2015 13:55:25 -0400
Subject: Automatic commit of package [openshift-ansible-bin] release
 [0.0.15-1].

---
 bin/openshift-ansible-bin.spec         | 5 ++++-
 rel-eng/packages/openshift-ansible-bin | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index d6691ea32..f2a191ab0 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.14
+Version:       0.0.15
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.15-1
+- 
+
 * Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
 - Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
 - Adding cache location for multi ec2 (kwoodson@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index 65c60b560..72a591e8c 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.14-1 bin/
+0.0.15-1 bin/
-- 
cgit v1.2.3


From 0860590391712510d69b26c5d711c962674525e0 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Fri, 15 May 2015 14:14:35 -0400
Subject: Automatic commit of package [openshift-ansible-bin] release
 [0.0.16-1].

---
 bin/openshift-ansible-bin.spec         | 5 ++++-
 rel-eng/packages/openshift-ansible-bin | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index f2a191ab0..87c0079f2 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.15
+Version:       0.0.16
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.16-1
+- 
+
 * Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.15-1
 - 
 
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index 72a591e8c..ea6e9777e 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.15-1 bin/
+0.0.16-1 bin/
-- 
cgit v1.2.3


From 1eda40ef2a4df9c5a6a728ac32d74ee1aaca2676 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Fri, 15 May 2015 14:15:08 -0400
Subject: Automatic commit of package [openshift-ansible-inventory] release
 [0.0.7-1].

---
 inventory/openshift-ansible-inventory.spec   | 5 ++++-
 rel-eng/packages/openshift-ansible-inventory | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index 0fe25ff31..7ba6e3df1 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.6
+Version:       0.0.7
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,9 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
+- Making multi_ec2 into a library (kwoodson@redhat.com)
+
 * Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
 - Added support for grouping and a bug fix. (kwoodson@redhat.com)
 
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index f4d326d08..df529d9fd 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.6-1 inventory/
+0.0.7-1 inventory/
-- 
cgit v1.2.3


From 890f2c5d039a501966eedbf8c7bb7b7e9b50464a Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Fri, 15 May 2015 14:48:55 -0400
Subject: fixed the openshift-ansible-bin build

---
 bin/openshift-ansible-bin.spec             | 12 ++++++------
 inventory/openshift-ansible-inventory.spec |  1 +
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index 87c0079f2..9336681d1 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -25,6 +25,12 @@ mkdir -p %{buildroot}/etc/openshift_ansible
 
 cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
 cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+
+# Make it so we can load multi_ec2.py as a library.
+rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
+ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
+
 cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
 
 cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
@@ -36,12 +42,6 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.16-1
-- 
-
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.15-1
-- 
-
 * Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
 - Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
 - Adding cache location for multi ec2 (kwoodson@redhat.com)
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index 7ba6e3df1..cd2332549 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -57,6 +57,7 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 - Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
 - Adding refresh-cache option and cleanup for pylint. Also updated for
   aws/hosts/ being added. (kwoodson@redhat.com)
+
 * Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
 - added the ability to have a config file in /etc/openshift_ansible to
   multi_ec2.py. (twiest@redhat.com)
-- 
cgit v1.2.3


From 09b5c45e90fbb321b7cf92c7a431b7aa4d4a803c Mon Sep 17 00:00:00 2001
From: Thomas Wiest <twiest@redhat.com>
Date: Fri, 15 May 2015 15:20:01 -0400
Subject: Automatic commit of package [openshift-ansible-bin] release
 [0.0.17-1].

---
 bin/openshift-ansible-bin.spec         | 5 ++++-
 rel-eng/packages/openshift-ansible-bin | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index 9336681d1..884d4eb0a 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.16
+Version:       0.0.17
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -42,6 +42,9 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
+- fixed the openshift-ansible-bin build (twiest@redhat.com)
+
 * Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
 - Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
 - Adding cache location for multi ec2 (kwoodson@redhat.com)
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index ea6e9777e..de9bb5157 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.16-1 bin/
+0.0.17-1 bin/
-- 
cgit v1.2.3


From 8ba0149d5eb990901f929437a5ee3b8ca8089bff Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Mon, 18 May 2015 15:05:04 -0400
Subject: Updating ansible config through a separate config.yml

---
 roles/ansible/tasks/config.yml                   |  8 ++++++++
 roles/ansible/tasks/main.yaml                    |  7 -------
 roles/ansible/tasks/main.yml                     | 11 +++++++++++
 roles/openshift_ansible_inventory/tasks/main.yml |  7 -------
 4 files changed, 19 insertions(+), 14 deletions(-)
 create mode 100644 roles/ansible/tasks/config.yml
 delete mode 100644 roles/ansible/tasks/main.yaml
 create mode 100644 roles/ansible/tasks/main.yml

diff --git a/roles/ansible/tasks/config.yml b/roles/ansible/tasks/config.yml
new file mode 100644
index 000000000..5e361429b
--- /dev/null
+++ b/roles/ansible/tasks/config.yml
@@ -0,0 +1,8 @@
+---
+- name: modify ansible.cfg
+  lineinfile:
+    dest: /etc/ansible/ansible.cfg
+    backrefs: yes
+    regexp: "^#?({{ item.option }})( *)="
+    line: '\1\2= {{ item.value }}'
+  with_items: cfg_options
diff --git a/roles/ansible/tasks/main.yaml b/roles/ansible/tasks/main.yaml
deleted file mode 100644
index 67a04b919..000000000
--- a/roles/ansible/tasks/main.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# Install ansible client
-
-- name: Install Ansible
-  yum:
-    pkg: ansible
-    state: installed
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml
new file mode 100644
index 000000000..5d20a3b35
--- /dev/null
+++ b/roles/ansible/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+# Install ansible client
+
+- name: Install Ansible
+  yum:
+    pkg: ansible
+    state: installed
+
+- include: config.yml
+  vars:
+    cfg_options: "{{ ans_config }}"
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 91c96d827..5e517cb46 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -32,12 +32,6 @@
     job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
   when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
 
-- lineinfile:
-    dest: /etc/ansible/ansible.cfg
-    backrefs: yes
-    regexp: '^(hostfile|inventory)( *)='
-    line: '\1\2= /etc/ansible/inventory'
-
 - name: setting ec2.ini destination_format
   lineinfile:
     dest: /usr/share/ansible/inventory/aws/ec2.ini
@@ -61,4 +55,3 @@
     recurse: yes
     mode: '2750'
   when: oo_inventory_cache_location is defined
-
-- 
cgit v1.2.3


From 3b82232fab00f4ac2fe3490058d2cab851773e50 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Tue, 19 May 2015 09:27:30 -0400
Subject: Removing destination format

---
 roles/openshift_ansible_inventory/tasks/main.yml | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 5e517cb46..5fe77e38b 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -32,20 +32,6 @@
     job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
   when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
 
-- name: setting ec2.ini destination_format
-  lineinfile:
-    dest: /usr/share/ansible/inventory/aws/ec2.ini
-    regexp: '^destination_format *='
-    line: "destination_format = {{ oo_ec2_destination_format }}"
-  when: oo_ec2_destination_format is defined
-
-- name: setting ec2.ini destination_format_tags
-  lineinfile:
-    dest: /usr/share/ansible/inventory/aws/ec2.ini
-    regexp: '^destination_format_tags *='
-    line: "destination_format_tags = {{ oo_ec2_destination_format_tags }}"
-  when: oo_ec2_destination_format_tags is defined
-
 - name: Set cache location
   file:
     state: directory
-- 
cgit v1.2.3


From 05386153cc68c10c1af78709b576ab35b93dacd3 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 20 May 2015 14:00:16 -0400
Subject: Guard against missing aws metadata for hostnames/ips

---
 roles/openshift_facts/library/openshift_facts.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ec27b5697..9c2657ff2 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -200,7 +200,7 @@ def normalize_aws_facts(metadata, facts):
         int_info = dict()
         var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
         for ips_var, int_var in var_map.iteritems():
-            ips = interface[int_var]
+            ips = interface.get(int_var)
             if isinstance(ips, basestring):
                 int_info[ips_var] = [ips]
             else:
@@ -220,14 +220,14 @@ def normalize_aws_facts(metadata, facts):
     # TODO: actually attempt to determine default local and public ips
     # by using the ansible default ip fact and the ipv4-associations
     # from the ec2 metadata
-    facts['network']['ip'] = metadata['local-ipv4']
-    facts['network']['public_ip'] = metadata['public-ipv4']
+    facts['network']['ip'] = metadata.get('local-ipv4')
+    facts['network']['public_ip'] = metadata.get('public-ipv4')
 
     # TODO: verify that local hostname makes sense and is resolvable
-    facts['network']['hostname'] = metadata['local-hostname']
+    facts['network']['hostname'] = metadata.get('local-hostname')
 
     # TODO: verify that public hostname makes sense and is resolvable
-    facts['network']['public_hostname'] = metadata['public-hostname']
+    facts['network']['public_hostname'] = metadata.get('public-hostname')
 
     return facts
 
-- 
cgit v1.2.3


From d04ddc76227db51fda8b1850a09a9c3cfd9125db Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 15:45:09 -0400
Subject: oo_filters.py: oo_set_node_label

---
 filter_plugins/oo_filters.py | 35 ++++++++++++++++++++++++++++++++++-
 1 file changed, 34 insertions(+), 1 deletion(-)

diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 097038450..8feb53f43 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -176,6 +176,38 @@ def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
         return [root_vol, docker_vol]
     return [root_vol]
 
+def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
+    ''' This cycles through openshift node definitions
+        (from "osc get nodes -o json"), and adds a label.
+
+        If attr_key and attr_value are set, this will only set the label on
+        nodes where the attribute matches the specified value.
+
+        Ex:
+        - shell: osc get nodes -o json
+          register: output
+
+        - set_fact:
+          node_facts: "{{ output.stdout
+                             | from_json
+                             | oo_set_node_label('region', 'infra',
+                                            'metadata.name', '172.16.17.43') }}"
+    '''
+
+    for item in arg['items']:
+        if attr_key and attr_value:
+            actual_attr_value = get_attr(item, attr_key)
+
+            if str(attr_value) != str(actual_attr_value):
+                continue # We only want to set the values on hosts with defined attributes
+
+        if 'labels' not in item['metadata']:
+            item['metadata']['labels'] = {}
+
+        item['metadata']['labels'][key] = value
+
+    return arg
+
 # disabling pylint checks for too-few-public-methods and no-self-use since we
 # need to expose a FilterModule object that has a filters method that returns
 # a mapping of filter names to methods.
@@ -192,5 +224,6 @@ class FilterModule(object):
             "oo_pdb": oo_pdb,
             "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
             "oo_ami_selector": oo_ami_selector,
-            "oo_ec2_volume_definition": oo_ec2_volume_definition
+            "oo_ec2_volume_definition": oo_ec2_volume_definition,
+            "oo_set_node_label": oo_set_node_label
         }
-- 
cgit v1.2.3


From 294f2254b818c466ecf7ab71426a08b6dc99f89c Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 15:45:23 -0400
Subject: Initial add of openshift_master_post

---
 roles/openshift_master_post/README.md         |  39 ++++++++
 roles/openshift_master_post/defaults/main.yml |   2 +
 roles/openshift_master_post/handlers/main.yml |   2 +
 roles/openshift_master_post/meta/main.yml     | 130 ++++++++++++++++++++++++++
 roles/openshift_master_post/tasks/main.yml    |  27 ++++++
 roles/openshift_master_post/vars/main.yml     |   5 +
 6 files changed, 205 insertions(+)
 create mode 100644 roles/openshift_master_post/README.md
 create mode 100644 roles/openshift_master_post/defaults/main.yml
 create mode 100644 roles/openshift_master_post/handlers/main.yml
 create mode 100644 roles/openshift_master_post/meta/main.yml
 create mode 100644 roles/openshift_master_post/tasks/main.yml
 create mode 100644 roles/openshift_master_post/vars/main.yml

diff --git a/roles/openshift_master_post/README.md b/roles/openshift_master_post/README.md
new file mode 100644
index 000000000..0aa08df5e
--- /dev/null
+++ b/roles/openshift_master_post/README.md
@@ -0,0 +1,39 @@
+OpenShift Master Post
+=========
+
+Post installation steps for setting up the cluster
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+| Name                        | Default value     | Description                                                                |
+|-----------------------------|-------------------|----------------------------------------------------------------------------|
+| omp_infra_node_filter_key   | "status.capacity" | Key from `osc get nodes -o json` to designate which node is the infra node |
+| omp_infra_node_filter_value | "7232144Ki"       | Value of omp_infra_node_filter_key to filter on |
+| omp_infra_node_label        | "infra"           | The label to apply to the infra node |
+| omp_node_region             | "us-east"         | Region that the none infra nodes are in |
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Wesley Hearn (whearn@redhat.com)
diff --git a/roles/openshift_master_post/defaults/main.yml b/roles/openshift_master_post/defaults/main.yml
new file mode 100644
index 000000000..4f2e60144
--- /dev/null
+++ b/roles/openshift_master_post/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for openshift_master_post
diff --git a/roles/openshift_master_post/handlers/main.yml b/roles/openshift_master_post/handlers/main.yml
new file mode 100644
index 000000000..e223d1ad0
--- /dev/null
+++ b/roles/openshift_master_post/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_master_post
diff --git a/roles/openshift_master_post/meta/main.yml b/roles/openshift_master_post/meta/main.yml
new file mode 100644
index 000000000..01cf61397
--- /dev/null
+++ b/roles/openshift_master_post/meta/main.yml
@@ -0,0 +1,130 @@
+---
+galaxy_info:
+  author: your name
+  description: 
+  company: your company (optional)
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+  min_ansible_version: 1.2
+  #
+  # Below are all platforms currently available. Just uncomment
+  # the ones that apply to your role. If you don't see your 
+  # platform on this list, let us know and we'll get it added!
+  #
+  #platforms:
+  #- name: EL
+  #  versions:
+  #  - all
+  #  - 5
+  #  - 6
+  #  - 7
+  #- name: GenericUNIX
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Fedora
+  #  versions:
+  #  - all
+  #  - 16
+  #  - 17
+  #  - 18
+  #  - 19
+  #  - 20
+  #- name: SmartOS
+  #  versions:
+  #  - all
+  #  - any
+  #- name: opensuse
+  #  versions:
+  #  - all
+  #  - 12.1
+  #  - 12.2
+  #  - 12.3
+  #  - 13.1
+  #  - 13.2
+  #- name: Amazon
+  #  versions:
+  #  - all
+  #  - 2013.03
+  #  - 2013.09
+  #- name: GenericBSD
+  #  versions:
+  #  - all
+  #  - any
+  #- name: FreeBSD
+  #  versions:
+  #  - all
+  #  - 8.0
+  #  - 8.1
+  #  - 8.2
+  #  - 8.3
+  #  - 8.4
+  #  - 9.0
+  #  - 9.1
+  #  - 9.1
+  #  - 9.2
+  #- name: Ubuntu
+  #  versions:
+  #  - all
+  #  - lucid
+  #  - maverick
+  #  - natty
+  #  - oneiric
+  #  - precise
+  #  - quantal
+  #  - raring
+  #  - saucy
+  #  - trusty
+  #- name: SLES
+  #  versions:
+  #  - all
+  #  - 10SP3
+  #  - 10SP4
+  #  - 11
+  #  - 11SP1
+  #  - 11SP2
+  #  - 11SP3
+  #- name: GenericLinux
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Debian
+  #  versions:
+  #  - all
+  #  - etch
+  #  - lenny
+  #  - squeeze
+  #  - wheezy
+  #
+  # Below are all categories currently available. Just as with
+  # the platforms above, uncomment those that apply to your role.
+  #
+  #categories:
+  #- cloud
+  #- cloud:ec2
+  #- cloud:gce
+  #- cloud:rax
+  #- clustering
+  #- database
+  #- database:nosql
+  #- database:sql
+  #- development
+  #- monitoring
+  #- networking
+  #- packaging
+  #- system
+  #- web
+dependencies: []
+  # List your role dependencies here, one per line.
+  # Be sure to remove the '[]' above if you add dependencies
+  # to this list.
+  
diff --git a/roles/openshift_master_post/tasks/main.yml b/roles/openshift_master_post/tasks/main.yml
new file mode 100644
index 000000000..391c4a293
--- /dev/null
+++ b/roles/openshift_master_post/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Get Nodes Config
+  shell: osc get nodes -o json | sed -e '/"resourceVersion"/d'
+  register: output
+
+- name: Set node regions
+    node_facts: "{{ output.stdout | from_json
+                  | oo_set_node_label('region', {{omp_node_region}}) }}"
+
+- name: Set infra node labels
+  set_fact:
+    node_final_facts: "{{ output.stdout | from_json
+                  | oo_set_node_label('region', {{omp_infra_node_label}},
+                  {{omp_infra_node_filter_key}}, {{omp_infra_node_filter_value}}) }}"
+
+- name: Write node config to temp file
+  copy:
+    content: "{{ node_final_facts }}"
+    dest: /tmp/nodes.json
+
+- name: Import new node config
+  shell: osc update node -f /tmp/nodes.json
+
+- name: Remove node temp file
+  file:
+    path: /tmp/nodes.json
+	state: absent
diff --git a/roles/openshift_master_post/vars/main.yml b/roles/openshift_master_post/vars/main.yml
new file mode 100644
index 000000000..1ebc12aa1
--- /dev/null
+++ b/roles/openshift_master_post/vars/main.yml
@@ -0,0 +1,5 @@
+---
+omp_infra_node_filter_key: "status.capacity"
+omp_infra_node_filter_value: "7232144Ki"
+omp_infra_node_label: "infra"
+omp_node_region: "us-east"
-- 
cgit v1.2.3


From f870624c57685494308deceb8e53e4a69586b757 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Thu, 14 May 2015 09:22:13 -0400
Subject: BYO playbook fix

- Fix bug where playbooks/byo/config.yml would error if only a master is
  defined in the inventory.
---
 playbooks/byo/config.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
index dce49d32f..e059514db 100644
--- a/playbooks/byo/config.yml
+++ b/playbooks/byo/config.yml
@@ -1,6 +1,8 @@
 ---
 - name: Run the openshift-master config playbook
   include: openshift-master/config.yml
+  when: groups.masters is defined and groups.masters
 
 - name: Run the openshift-node config playbook
   include: openshift-node/config.yml
+  when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters
-- 
cgit v1.2.3


From f7bbf6599623eefcecef89c10ee3f6fcbb97c3f7 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Thu, 14 May 2015 12:41:48 -0400
Subject: Verify ansible is greater than 1.8.0 and not 1.9.0

---
 roles/openshift_facts/tasks/main.yml | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 5a7d10d25..d71e6d019 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,3 +1,9 @@
 ---
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
+  assert:
+    that:
+    - ansible_version | version_compare('1.8.0', 'ge')
+    - ansible_version | version_compare('1.9.0', 'ne')
+
 - name: Gather OpenShift facts
   openshift_facts:
-- 
cgit v1.2.3


From 4da7b790d5eb8e870bd81208f7074bba81a0989b Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 16:45:31 -0400
Subject: Made a generic set_attr and set_attrs function to reduce dup code

---
 filter_plugins/oo_filters.py        | 32 --------------
 filter_plugins/oo_resourceconfig.py | 87 +++++++++++++++++++++++++++++++++++++
 2 files changed, 87 insertions(+), 32 deletions(-)
 create mode 100644 filter_plugins/oo_resourceconfig.py

diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8feb53f43..e02114d9e 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -176,38 +176,6 @@ def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
         return [root_vol, docker_vol]
     return [root_vol]
 
-def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
-    ''' This cycles through openshift node definitions
-        (from "osc get nodes -o json"), and adds a label.
-
-        If attr_key and attr_value are set, this will only set the label on
-        nodes where the attribute matches the specified value.
-
-        Ex:
-        - shell: osc get nodes -o json
-          register: output
-
-        - set_fact:
-          node_facts: "{{ output.stdout
-                             | from_json
-                             | oo_set_node_label('region', 'infra',
-                                            'metadata.name', '172.16.17.43') }}"
-    '''
-
-    for item in arg['items']:
-        if attr_key and attr_value:
-            actual_attr_value = get_attr(item, attr_key)
-
-            if str(attr_value) != str(actual_attr_value):
-                continue # We only want to set the values on hosts with defined attributes
-
-        if 'labels' not in item['metadata']:
-            item['metadata']['labels'] = {}
-
-        item['metadata']['labels'][key] = value
-
-    return arg
-
 # disabling pylint checks for too-few-public-methods and no-self-use since we
 # need to expose a FilterModule object that has a filters method that returns
 # a mapping of filter names to methods.
diff --git a/filter_plugins/oo_resourceconfig.py b/filter_plugins/oo_resourceconfig.py
new file mode 100644
index 000000000..6d1c31443
--- /dev/null
+++ b/filter_plugins/oo_resourceconfig.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Filters for configuring resources in openshift-ansible
+'''
+
+from ansible import errors
+
+def get_attr(data, attribute=None):
+    ''' This looks up dictionary attributes of the form a.b.c and returns
+        the value.
+        Ex: data = {'a': {'b': {'c': 5}}}
+            attribute = "a.b.c"
+            returns 5
+    '''
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+    ptr = data
+    for attr in attribute.split('.'):
+        ptr = ptr[attr]
+
+    return ptr
+
+def set_attr(item, key, value, attr_key=None, attr_value=None):
+    if attr_key and attr_value:
+        actual_attr_value = get_attr(item, attr_key)
+
+        if str(attr_value) != str(actual_attr_value):
+            continue # We only want to set the values on hosts with defined attributes
+
+        kvp = item
+        for attr in key.split('.'):
+            if attr == key.split('.')[len(key.split('.'))-1]:
+                kvp[attr] = value
+                continue
+            if attr not in kvp:
+                kvp[attr] = {}
+
+            kvp = kvp[attr]
+    return item
+
+
+def set_attrs(items, key, value, attr_key=None, attr_value=None):
+    for item in items:
+        create_update_key(item, key, value, attr_key, attr_value)
+
+    return items
+
+
+def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
+    ''' This cycles through openshift node definitions
+        (from "osc get nodes -o json"), and adds a label.
+
+        If attr_key and attr_value are set, this will only set the label on
+        nodes where the attribute matches the specified value.
+
+        Ex:
+        - shell: osc get nodes -o json
+          register: output
+
+        - set_fact:
+          node_facts: "{{ output.stdout
+                             | from_json
+                             | oo_set_node_label('region', 'infra',
+                                            'metadata.name', '172.16.17.43') }}"
+    '''
+    arg['items'] = set_attrs(arg['items'], key, value, attr_key, attr_value)
+
+    return arg
+
+
+def oo_set_resource_node(arg, value):
+    arg = set_attr(arg, 'template.podTemplate.nodeSelector.region', value)
+
+    return arg
+
+
+class FilterModule(object):
+    ''' FilterModule '''
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {
+            "oo_set_node_label": oo_set_node_label,
+            "oo_set_resource_node": oo_set_resource_node
+        }
-- 
cgit v1.2.3


From 970ac423967bbe3806e74de2dfefd7da5bb851d2 Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 16:47:07 -0400
Subject: Revert "Made a generic set_attr and set_attrs function to reduce dup
 code"

This reverts commit 4da7b790d5eb8e870bd81208f7074bba81a0989b.
---
 filter_plugins/oo_filters.py        | 32 ++++++++++++++
 filter_plugins/oo_resourceconfig.py | 87 -------------------------------------
 2 files changed, 32 insertions(+), 87 deletions(-)
 delete mode 100644 filter_plugins/oo_resourceconfig.py

diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index e02114d9e..8feb53f43 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -176,6 +176,38 @@ def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
         return [root_vol, docker_vol]
     return [root_vol]
 
+def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
+    ''' This cycles through openshift node definitions
+        (from "osc get nodes -o json"), and adds a label.
+
+        If attr_key and attr_value are set, this will only set the label on
+        nodes where the attribute matches the specified value.
+
+        Ex:
+        - shell: osc get nodes -o json
+          register: output
+
+        - set_fact:
+          node_facts: "{{ output.stdout
+                             | from_json
+                             | oo_set_node_label('region', 'infra',
+                                            'metadata.name', '172.16.17.43') }}"
+    '''
+
+    for item in arg['items']:
+        if attr_key and attr_value:
+            actual_attr_value = get_attr(item, attr_key)
+
+            if str(attr_value) != str(actual_attr_value):
+                continue # We only want to set the values on hosts with defined attributes
+
+        if 'labels' not in item['metadata']:
+            item['metadata']['labels'] = {}
+
+        item['metadata']['labels'][key] = value
+
+    return arg
+
 # disabling pylint checks for too-few-public-methods and no-self-use since we
 # need to expose a FilterModule object that has a filters method that returns
 # a mapping of filter names to methods.
diff --git a/filter_plugins/oo_resourceconfig.py b/filter_plugins/oo_resourceconfig.py
deleted file mode 100644
index 6d1c31443..000000000
--- a/filter_plugins/oo_resourceconfig.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
-Filters for configuring resources in openshift-ansible
-'''
-
-from ansible import errors
-
-def get_attr(data, attribute=None):
-    ''' This looks up dictionary attributes of the form a.b.c and returns
-        the value.
-        Ex: data = {'a': {'b': {'c': 5}}}
-            attribute = "a.b.c"
-            returns 5
-    '''
-    if not attribute:
-        raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
-    ptr = data
-    for attr in attribute.split('.'):
-        ptr = ptr[attr]
-
-    return ptr
-
-def set_attr(item, key, value, attr_key=None, attr_value=None):
-    if attr_key and attr_value:
-        actual_attr_value = get_attr(item, attr_key)
-
-        if str(attr_value) != str(actual_attr_value):
-            continue # We only want to set the values on hosts with defined attributes
-
-        kvp = item
-        for attr in key.split('.'):
-            if attr == key.split('.')[len(key.split('.'))-1]:
-                kvp[attr] = value
-                continue
-            if attr not in kvp:
-                kvp[attr] = {}
-
-            kvp = kvp[attr]
-    return item
-
-
-def set_attrs(items, key, value, attr_key=None, attr_value=None):
-    for item in items:
-        create_update_key(item, key, value, attr_key, attr_value)
-
-    return items
-
-
-def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
-    ''' This cycles through openshift node definitions
-        (from "osc get nodes -o json"), and adds a label.
-
-        If attr_key and attr_value are set, this will only set the label on
-        nodes where the attribute matches the specified value.
-
-        Ex:
-        - shell: osc get nodes -o json
-          register: output
-
-        - set_fact:
-          node_facts: "{{ output.stdout
-                             | from_json
-                             | oo_set_node_label('region', 'infra',
-                                            'metadata.name', '172.16.17.43') }}"
-    '''
-    arg['items'] = set_attrs(arg['items'], key, value, attr_key, attr_value)
-
-    return arg
-
-
-def oo_set_resource_node(arg, value):
-    arg = set_attr(arg, 'template.podTemplate.nodeSelector.region', value)
-
-    return arg
-
-
-class FilterModule(object):
-    ''' FilterModule '''
-    def filters(self):
-        ''' returns a mapping of filters to methods '''
-        return {
-            "oo_set_node_label": oo_set_node_label,
-            "oo_set_resource_node": oo_set_resource_node
-        }
-- 
cgit v1.2.3


From a58e431f42763738061db0477d1e863169199c7a Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 17:02:21 -0400
Subject: Revert "Initial add of openshift_master_post"

This reverts commit 294f2254b818c466ecf7ab71426a08b6dc99f89c.
---
 roles/openshift_master_post/README.md         |  39 --------
 roles/openshift_master_post/defaults/main.yml |   2 -
 roles/openshift_master_post/handlers/main.yml |   2 -
 roles/openshift_master_post/meta/main.yml     | 130 --------------------------
 roles/openshift_master_post/tasks/main.yml    |  27 ------
 roles/openshift_master_post/vars/main.yml     |   5 -
 6 files changed, 205 deletions(-)
 delete mode 100644 roles/openshift_master_post/README.md
 delete mode 100644 roles/openshift_master_post/defaults/main.yml
 delete mode 100644 roles/openshift_master_post/handlers/main.yml
 delete mode 100644 roles/openshift_master_post/meta/main.yml
 delete mode 100644 roles/openshift_master_post/tasks/main.yml
 delete mode 100644 roles/openshift_master_post/vars/main.yml

diff --git a/roles/openshift_master_post/README.md b/roles/openshift_master_post/README.md
deleted file mode 100644
index 0aa08df5e..000000000
--- a/roles/openshift_master_post/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-OpenShift Master Post
-=========
-
-Post installation steps for setting up the cluster
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-| Name                        | Default value     | Description                                                                |
-|-----------------------------|-------------------|----------------------------------------------------------------------------|
-| omp_infra_node_filter_key   | "status.capacity" | Key from `osc get nodes -o json` to designate which node is the infra node |
-| omp_infra_node_filter_value | "7232144Ki"       | Value of omp_infra_node_filter_key to filter on |
-| omp_infra_node_label        | "infra"           | The label to apply to the infra node |
-| omp_node_region             | "us-east"         | Region that the none infra nodes are in |
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Wesley Hearn (whearn@redhat.com)
diff --git a/roles/openshift_master_post/defaults/main.yml b/roles/openshift_master_post/defaults/main.yml
deleted file mode 100644
index 4f2e60144..000000000
--- a/roles/openshift_master_post/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for openshift_master_post
diff --git a/roles/openshift_master_post/handlers/main.yml b/roles/openshift_master_post/handlers/main.yml
deleted file mode 100644
index e223d1ad0..000000000
--- a/roles/openshift_master_post/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for openshift_master_post
diff --git a/roles/openshift_master_post/meta/main.yml b/roles/openshift_master_post/meta/main.yml
deleted file mode 100644
index 01cf61397..000000000
--- a/roles/openshift_master_post/meta/main.yml
+++ /dev/null
@@ -1,130 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # If the issue tracker for your role is not on github, uncomment the
-  # next line and provide a value
-  # issue_tracker_url: http://example.com/issue/tracker
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: SmartOS
-  #  versions:
-  #  - all
-  #  - any
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  
diff --git a/roles/openshift_master_post/tasks/main.yml b/roles/openshift_master_post/tasks/main.yml
deleted file mode 100644
index 391c4a293..000000000
--- a/roles/openshift_master_post/tasks/main.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Get Nodes Config
-  shell: osc get nodes -o json | sed -e '/"resourceVersion"/d'
-  register: output
-
-- name: Set node regions
-    node_facts: "{{ output.stdout | from_json
-                  | oo_set_node_label('region', {{omp_node_region}}) }}"
-
-- name: Set infra node labels
-  set_fact:
-    node_final_facts: "{{ output.stdout | from_json
-                  | oo_set_node_label('region', {{omp_infra_node_label}},
-                  {{omp_infra_node_filter_key}}, {{omp_infra_node_filter_value}}) }}"
-
-- name: Write node config to temp file
-  copy:
-    content: "{{ node_final_facts }}"
-    dest: /tmp/nodes.json
-
-- name: Import new node config
-  shell: osc update node -f /tmp/nodes.json
-
-- name: Remove node temp file
-  file:
-    path: /tmp/nodes.json
-	state: absent
diff --git a/roles/openshift_master_post/vars/main.yml b/roles/openshift_master_post/vars/main.yml
deleted file mode 100644
index 1ebc12aa1..000000000
--- a/roles/openshift_master_post/vars/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-omp_infra_node_filter_key: "status.capacity"
-omp_infra_node_filter_value: "7232144Ki"
-omp_infra_node_label: "infra"
-omp_node_region: "us-east"
-- 
cgit v1.2.3


From dc3437ad8e4f9323ebe916a44cca2e4b41e9aedf Mon Sep 17 00:00:00 2001
From: Wesley Hearn <whearn@redhat.com>
Date: Wed, 20 May 2015 17:02:33 -0400
Subject: Revert "oo_filters.py: oo_set_node_label"

This reverts commit d04ddc76227db51fda8b1850a09a9c3cfd9125db.
---
 filter_plugins/oo_filters.py | 35 +----------------------------------
 1 file changed, 1 insertion(+), 34 deletions(-)

diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8feb53f43..097038450 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -176,38 +176,6 @@ def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
         return [root_vol, docker_vol]
     return [root_vol]
 
-def oo_set_node_label(arg, key, value, attr_key=None, attr_value=None):
-    ''' This cycles through openshift node definitions
-        (from "osc get nodes -o json"), and adds a label.
-
-        If attr_key and attr_value are set, this will only set the label on
-        nodes where the attribute matches the specified value.
-
-        Ex:
-        - shell: osc get nodes -o json
-          register: output
-
-        - set_fact:
-          node_facts: "{{ output.stdout
-                             | from_json
-                             | oo_set_node_label('region', 'infra',
-                                            'metadata.name', '172.16.17.43') }}"
-    '''
-
-    for item in arg['items']:
-        if attr_key and attr_value:
-            actual_attr_value = get_attr(item, attr_key)
-
-            if str(attr_value) != str(actual_attr_value):
-                continue # We only want to set the values on hosts with defined attributes
-
-        if 'labels' not in item['metadata']:
-            item['metadata']['labels'] = {}
-
-        item['metadata']['labels'][key] = value
-
-    return arg
-
 # disabling pylint checks for too-few-public-methods and no-self-use since we
 # need to expose a FilterModule object that has a filters method that returns
 # a mapping of filter names to methods.
@@ -224,6 +192,5 @@ class FilterModule(object):
             "oo_pdb": oo_pdb,
             "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
             "oo_ami_selector": oo_ami_selector,
-            "oo_ec2_volume_definition": oo_ec2_volume_definition,
-            "oo_set_node_label": oo_set_node_label
+            "oo_ec2_volume_definition": oo_ec2_volume_definition
         }
-- 
cgit v1.2.3


From 50900f4301b776d45b464b48744b8b2927ac9432 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 20 May 2015 17:22:41 -0400
Subject: openshift_master open port for skydns service

---
 roles/openshift_master/defaults/main.yml | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 87fb347a8..56cf43531 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -2,12 +2,15 @@
 openshift_node_ips: []
 
 # TODO: update setting these values based on the facts
-# TODO: update for console port change
 os_firewall_allow:
 - service: etcd embedded
   port: 4001/tcp
 - service: OpenShift api https
   port: 8443/tcp
+- service: OpenShift dns tcp
+  port: 53/tcp
+- service: OpenShift dns udp
+  port: 53/udp
 os_firewall_deny:
 - service: OpenShift api http
   port: 8080/tcp
-- 
cgit v1.2.3


From e9b33b6ebacb5c72ba69974dc49649edcbdabfec Mon Sep 17 00:00:00 2001
From: Troy Dawson <tdawson@redhat.com>
Date: Thu, 21 May 2015 11:30:32 -0500
Subject: Update online ami image

---
 playbooks/aws/ansible-tower/launch.yml                | 2 +-
 playbooks/aws/openshift-cluster/vars.online.int.yml   | 2 +-
 playbooks/aws/openshift-cluster/vars.online.prod.yml  | 2 +-
 playbooks/aws/openshift-cluster/vars.online.stage.yml | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 56235bc8a..c23bda3a0 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -6,7 +6,7 @@
 
   vars:
     inst_region: us-east-1
-    rhel7_ami: ami-906240f8
+    rhel7_ami: ami-78756d10
     user_data_file: user_data.txt
 
   vars_files:
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
index 12f79a9c1..e115615d5 100644
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
index 12f79a9c1..e115615d5 100644
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
index 12f79a9c1..e115615d5 100644
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-- 
cgit v1.2.3