From 81628f94bad4b303212bf77752f62c03728e0168 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 17 Mar 2015 11:09:12 -0400 Subject: Fix hostname handling - always set hostname if hostname does not match openshift_hostname - Use local IP instead of public IP as hostname for workaround --- roles/openshift_common/README.md | 1 - roles/openshift_common/defaults/main.yml | 3 +-- roles/openshift_common/tasks/main.yml | 6 ++---- 3 files changed, 3 insertions(+), 7 deletions(-) (limited to 'roles') diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index c2ae609ff..79076ed19 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -14,7 +14,6 @@ Role Variables | Name | Default value | | |-------------------------------|------------------------------|----------------------------------------| -| openshift_bind_ip | ansible_default_ipv4.address | IP to use for local binding | | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | Workaround needed to set hostname to IP address | | openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index a541591fb..eb6edbc03 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,8 +1,7 @@ --- -openshift_bind_ip: "{{ ansible_default_ipv4.address }}" openshift_debug_level: 0 # TODO: Once openshift stops resolving hostnames for node queries remove # this... openshift_hostname_workaround: true -openshift_hostname: "{{ openshift_public_ip if openshift_hostname_workaround else ansible_fqdn }}" +openshift_hostname: "{{ ansible_default_ipv4.address if openshift_hostname_workaround else ansible_fqdn }}" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 728bba4e4..07737a71f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,8 +1,6 @@ --- -# fixme: Once openshift stops resolving hostnames for node queries remove this... -- name: Set hostname to IP Addr (WORKAROUND) - hostname: name={{ openshift_bind_ip }} - when: openshift_hostname_workaround +- name: Set hostname + hostname: name={{ openshift_hostname }} - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 7035459d20dd2d278b0a0e6ff96421639f6e0e34 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 00:05:51 -0400 Subject: Register node fixes - Set --hostname flag in node config in openshift_node role - Support some additional node attributes in openshift_node role - podCIDR - labels - annotations - Support both output types for openshift ex config view in openshift_register_node module - Support multiple api versions in openshift_register_node module - Support additional attributes in openshift_register_node module - annotations - labels - pod_cidr - external_ips (v1beta3, will be available after next kube rebase) - internal_ips (v1beta3, will be available after next kube rebase) - hostnames (v1beta3, will be available after next kube rebase) - external_id (v1beta3, will be available after next kube rebase) --- roles/openshift_node/defaults/main.yml | 8 +- .../library/openshift_register_node.py | 453 ++++++++++++++------- roles/openshift_node/tasks/main.yml | 14 +- 3 files changed, 333 insertions(+), 142 deletions(-) (limited to 'roles') diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index c45524f16..e4d5ebfee 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -5,6 +5,8 @@ os_firewall_allow: - service: OpenShift kubelet port: 10250/tcp openshift_node_resources: - capacity: - cpu: - memory: + cpu: + memory: + cidr: +openshift_node_labels: {} +openshift_node_annotations: {} diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py index 63079e59b..4922585d7 100644 --- a/roles/openshift_node/library/openshift_register_node.py +++ b/roles/openshift_node/library/openshift_register_node.py @@ -6,78 +6,315 @@ import os import multiprocessing import socket from subprocess import check_output, Popen +from decimal import * DOCUMENTATION = ''' --- -module: openshift_register_node -short_description: This module registers an openshift-node with an openshift-master -author: Jason DeTiberus -requirements: [ openshift-node ] -notes: Node resources can be specified using either the resources option or the following options: cpu, memory +module: kubernetes_register_node +short_description: Registers a kubernetes node with a master +description: + - Registers a kubernetes node with a master options: name: + default: null description: - - id for this node (usually the node fqdn) + - Identifier for this node (usually the node fqdn). required: true - hostIP: + api_verison: + choices: ['v1beta1', 'v1beta3'] + default: 'v1beta1' description: - - ip address for this node + - Kubernetes API version to use + required: true + host_ip: + default: null + description: + - IP Address to associate with the node when registering. + Available in the following API versions: v1beta1. required: false - cpu: + hostnames: + default: [] description: - - number of CPUs for this node + - Valid hostnames for this node. Available in the following API + versions: v1beta3. required: false - default: number of logical CPUs detected - memory: + external_ips: + default: [] description: - - Memory available for this node in bytes + - External IP Addresses for this node. Available in the following API + versions: v1beta3. required: false - default: 80% MemTotal - resources: + internal_ips: + default: [] description: - - A json string representing Node resources + - Internal IP Addresses for this node. Available in the following API + versions: v1beta3. + required: false + cpu: + default: null + description: + - Number of CPUs to allocate for this node. If not provided, then + the node will be registered to advertise the number of logical + CPUs available. When using the v1beta1 API, you must specify the + CPU count as a floating point number with no more than 3 decimal + places. API version v1beta3 and newer accepts arbitrary float + values. + required: false + memory: + default: null + description: + - Memory available for this node. If not provided, then the node + will be registered to advertise 80% of MemTotal as available + memory. When using the v1beta1 API, you must specify the memory + size in bytes. API version v1beta3 and newer accepts binary SI + and decimal SI values. required: false ''' EXAMPLES = ''' # Minimal node registration - openshift_register_node: name=ose3.node.example.com -# Node registration with all options (using cpu and memory options) +# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of +# Memory - openshift_register_node: name: ose3.node.example.com + api_version: v1beta1 hostIP: 192.168.1.1 - apiVersion: v1beta1 cpu: 1 - memory: 1073741824 + memory: 500000000 -# Node registration with all options (using resources option) +# Node registration using the v1beta3 API, setting an alternate hostname, +# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory - openshift_register_node: name: ose3.node.example.com - hostIP: 192.168.1.1 - apiVersion: v1beta1 - resources: - capacity: - cpu: 1 - memory: 1073741824 + api_version: v1beta3 + external_ips: ['192.168.1.5'] + internal_ips: ['10.0.0.5'] + hostnames: ['ose2.node.internal.local'] + cpu: 3.5 + memory: 1Ti ''' + +class ClientConfigException(Exception): + pass + +class ClientConfig: + def __init__(self, client_opts, module): + _, output, error = module.run_command(["/usr/bin/openshift", "ex", + "config", "view", "-o", + "json"] + client_opts, + check_rc = True) + self.config = json.loads(output) + + if not (bool(self.config['clusters']) or + bool(self.config['contexts']) or + bool(self.config['current-context']) or + bool(self.config['users'])): + raise ClientConfigException(msg="Client config missing required " \ + "values", + output=output) + + def current_context(self): + return self.config['current-context'] + + def section_has_value(self, section_name, value): + section = self.config[section_name] + if isinstance(section, dict): + return value in section + else: + val = next((item for item in section + if item['name'] == value), None) + return val is not None + + def has_context(self, context): + return self.section_has_value('contexts', context) + + def has_user(self, user): + return self.section_has_value('users', user) + + def has_cluster(self, cluster): + return self.section_has_value('clusters', cluster) + + def get_value_for_context(self, context, attribute): + contexts = self.config['contexts'] + if isinstance(contexts, dict): + return contexts[context][attribute] + else: + return next((c['context'][attribute] for c in contexts + if c['name'] == context), None) + + def get_user_for_context(self, context): + return self.get_value_for_context(context, 'user') + + def get_cluster_for_context(self, context): + return self.get_value_for_context(context, 'cluster') + +class Util: + @staticmethod + def getLogicalCores(): + return multiprocessing.cpu_count() + + @staticmethod + def getMemoryPct(pct): + with open('/proc/meminfo', 'r') as mem: + for line in mem: + entries = line.split() + if str(entries.pop(0)) == 'MemTotal:': + mem_total_kb = Decimal(entries.pop(0)) + mem_capacity_kb = mem_total_kb * Decimal(pct) + return str(mem_capacity_kb.to_integral_value() * 1024) + + return "" + + @staticmethod + def remove_empty_elements(mapping): + if isinstance(mapping, dict): + m = mapping.copy() + for key, val in mapping.iteritems(): + if not val: + del m[key] + return m + else: + return mapping + +class NodeResources: + def __init__(self, version, cpu=None, memory=None): + if version == 'v1beta1': + self.resources = dict(capacity=dict()) + self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() + self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75) + + def get_resources(self): + return Util.remove_empty_elements(self.resources) + +class NodeSpec: + def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): + if version == 'v1beta3': + self.spec = dict(podCIDR=cidr, externalID=externalID, + capacity=dict()) + self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() + self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75) + + def get_spec(self): + return Util.remove_empty_elements(self.spec) + +class NodeStatus: + def addAddresses(self, addressType, addresses): + addressList = [] + for address in addresses: + addressList.append(dict(type=addressType, address=address)) + return addressList + + def __init__(self, version, externalIPs = [], internalIPs = [], + hostnames = []): + if version == 'v1beta3': + self.status = dict(addresses = addAddresses('ExternalIP', + externalIPs) + + addAddresses('InternalIP', + internalIPs) + + addAddresses('Hostname', + hostnames)) + + def get_status(self): + return Util.remove_empty_elements(self.status) + +class Node: + def __init__(self, module, client_opts, version='v1beta1', name=None, + hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], + cpu=None, memory=None, labels=dict(), annotations=dict(), + podCIDR=None, externalID=None): + self.module = module + self.client_opts = client_opts + if version == 'v1beta1': + self.node = dict(id = name, + kind = 'Node', + apiVersion = version, + hostIP = hostIP, + resources = NodeResources(version, cpu, memory), + cidr = podCIDR, + labels = labels, + annotations = annotations + ) + elif version == 'v1beta3': + metadata = dict(name = name, + labels = labels, + annotations = annotations + ) + self.node = dict(kind = 'Node', + apiVersion = version, + metadata = metadata, + spec = NodeSpec(version, cpu, memory, podCIDR, + externalID), + status = NodeStatus(version, externalIPs, + internalIPs, hostnames), + ) + + def get_name(self): + if self.node['apiVersion'] == 'v1beta1': + return self.node['id'] + elif self.node['apiVersion'] == 'v1beta3': + return self.node['name'] + + def get_node(self): + node = self.node.copy() + if self.node['apiVersion'] == 'v1beta1': + node['resources'] = self.node['resources'].get_resources() + elif self.node['apiVersion'] == 'v1beta3': + node['spec'] = self.node['spec'].get_spec() + node['status'] = self.node['status'].get_status() + return Util.remove_empty_elements(node) + + def exists(self): + _, output, error = self.module.run_command(["/usr/bin/osc", "get", + "nodes"] + self.client_opts, + check_rc = True) + if re.search(self.module.params['name'], output, re.MULTILINE): + return True + return False + + def create(self): + cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] + rc, output, error = self.module.run_command(cmd, + data=self.module.jsonify(self.get_node())) + if rc != 0: + if re.search("minion \"%s\" already exists" % self.get_name(), + error): + self.module.exit_json(changed=False, + msg="node definition already exists", + node=self.get_node()) + else: + self.module.fail_json(msg="Node creation failed.", rc=rc, + output=output, error=error, + node=self.get_node()) + else: + return True + def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required = True), - hostIP = dict(), - apiVersion = dict(), - cpu = dict(), - memory = dict(), - resources = dict(), - client_config = dict(), - client_cluster = dict(default = 'master'), - client_context = dict(default = 'master'), - client_user = dict(default = 'admin') + name = dict(required = True, type = 'str'), + host_ip = dict(type = 'str'), + hostnames = dict(type = 'list', default = []), + external_ips = dict(type = 'list', default = []), + internal_ips = dict(type = 'list', default = []), + api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 + choices = ['v1beta1', 'v1beta3']), + cpu = dict(type = 'str'), + memory = dict(type = 'str'), + labels = dict(type = 'dict', default = {}), # TODO: needs documented + annotations = dict(type = 'dict', default = {}), # TODO: needs documented + pod_cidr = dict(type = 'str'), # TODO: needs documented + external_id = dict(type = 'str'), # TODO: needs documented + client_config = dict(type = 'str'), # TODO: needs documented + client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented + client_context = dict(type = 'str', default = 'master'), # TODO: needs documented + client_user = dict(type = 'str', default = 'admin') # TODO: needs documented ), mutually_exclusive = [ - ['resources', 'cpu'], - ['resources', 'memory'] + ['host_ip', 'external_ips'], + ['host_ip', 'internal_ips'], + ['host_ip', 'hostnames'], ], supports_check_mode=True ) @@ -93,119 +330,61 @@ def main(): client_opts.append("--kubeconfig=%s" % module.params['client_config']) try: - output = check_output(["/usr/bin/openshift", "ex", "config", "view", - "-o", "json"] + client_opts, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - module.fail_json(msg="Failed to get client configuration", - command=e.cmd, returncode=e.returncode, output=e.output) - - config = json.loads(output) - if not (bool(config['clusters']) or bool(config['contexts']) or - bool(config['current-context']) or bool(config['users'])): - module.fail_json(msg="Client config missing required values", - output=output) + config = ClientConfig(client_opts, module) + except ClientConfigException as e: + module.fail_json(msg="Failed to get client configuration", exception=e) client_context = module.params['client_context'] - if client_context: - config_context = next((context for context in config['contexts'] - if context['name'] == client_context), None) - if not config_context: - module.fail_json(msg="Context %s not found in client config" % - client_context) - if not config['current-context'] or config['current-context'] != client_context: + if config.has_context(client_context): + if client_context != config.current_context(): client_opts.append("--context=%s" % client_context) + else: + module.fail_json(msg="Context %s not found in client config" % + client_context) client_user = module.params['client_user'] - if client_user: - config_user = next((user for user in config['users'] - if user['name'] == client_user), None) - if not config_user: - module.fail_json(msg="User %s not found in client config" % - client_user) - if client_user != config_context['context']['user']: + if config.has_user(client_user): + if client_user != config.get_user_for_context(client_context): client_opts.append("--user=%s" % client_user) + else: + module.fail_json(msg="User %s not found in client config" % + client_user) client_cluster = module.params['client_cluster'] - if client_cluster: - config_cluster = next((cluster for cluster in config['clusters'] - if cluster['name'] == client_cluster), None) - if not client_cluster: - module.fail_json(msg="Cluster %s not found in client config" % - client_cluster) - if client_cluster != config_context['context']['cluster']: + if config.has_cluster(client_cluster): + if client_cluster != config.get_cluster_for_context(client_cluster): client_opts.append("--cluster=%s" % client_cluster) + else: + module.fail_json(msg="Cluster %s not found in client config" % + client_cluster) - node_def = dict( - id = module.params['name'], - kind = 'Node', - apiVersion = 'v1beta1', - resources = dict( - capacity = dict() - ) - ) - - for key, value in module.params.iteritems(): - if key in ['cpu', 'memory']: - node_def['resources']['capacity'][key] = value - elif key == 'name': - node_def['id'] = value - elif key != 'client_config': - if value: - node_def[key] = value + # TODO: provide sane defaults for some (like hostname, externalIP, + # internalIP, etc) + node = Node(module, client_opts, module.params['api_version'], + module.params['name'], module.params['host_ip'], + module.params['hostnames'], module.params['external_ips'], + module.params['internal_ips'], module.params['cpu'], + module.params['memory'], module.params['labels'], + module.params['annotations'], module.params['pod_cidr'], + module.params['external_id']) - if not node_def['resources']['capacity']['cpu']: - node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count() - - if not node_def['resources']['capacity']['memory']: - with open('/proc/meminfo', 'r') as mem: - for line in mem: - entries = line.split() - if str(entries.pop(0)) == 'MemTotal:': - mem_total_kb = int(entries.pop(0)) - mem_capacity = int(mem_total_kb * 1024 * .75) - node_def['resources']['capacity']['memory'] = mem_capacity - break - - try: - output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - module.fail_json(msg="Failed to get node list", command=e.cmd, - returncode=e.returncode, output=e.output) - - if re.search(module.params['name'], output, re.MULTILINE): - module.exit_json(changed=False, node_def=node_def) + # TODO: attempt to support changing node settings where possible and/or + # modifying node resources + if node.exists(): + module.exit_json(changed=False, node=node.get_node()) elif module.check_mode: - module.exit_json(changed=True, node_def=node_def) - - config_def = dict( - metadata = dict( - name = "add-node-%s" % module.params['name'] - ), - kind = 'Config', - apiVersion = 'v1beta1', - items = [node_def] - ) - - p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"], - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, close_fds=True) - (out, err) = p.communicate(module.jsonify(config_def)) - ret = p.returncode - - if ret != 0: - if re.search("minion \"%s\" already exists" % module.params['name'], - err): - module.exit_json(changed=False, - msg="node definition already exists", config_def=config_def) + module.exit_json(changed=True, node=node.get_node()) + else: + if node.create(): + module.exit_json(changed=True, + msg="Node created successfully", + node=node.get_node()) else: - module.fail_json(msg="Node creation failed.", ret=ret, out=out, - err=err, config_def=config_def) + module.fail_json(msg="Unknown error creating node", + node=node.get_node()) - module.exit_json(changed=True, out=out, err=err, ret=ret, - node_def=config_def) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 6721c7401..e380ba1fb 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -21,7 +21,7 @@ lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --loglevel={{ openshift_node_debug_level }}\"" + line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }}\"" notify: - restart openshift-node @@ -75,4 +75,14 @@ - name: Register node (if not already registered) openshift_register_node: name: "{{ openshift_hostname }}" - resources: "{{ openshift_node_resources }}" + api_version: v1beta1 + cpu: "{{ openshift_node_resources.cpu }}" + memory: "{{ openshift_node_resources.memory }}" + pod_cidr: "{{ openshift_node_resources.cidr }}" + host_ip: "{{ ansible_default_ipv4.address }}" + labels: "{{ openshift_node_labels }}" + annotations: "{{ openshift_node_annotations }}" + # TODO: support customizing other attributes such as: client_config, + # client_cluster, client_context, client_user + # TODO: updated for v1beta3 changes after rebase: hostnames, external_ips, + # internal_ips, external_id -- cgit v1.2.3 From 8613b70503d2d1cbe57ddebc11919edeb26eaadc Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 17:15:19 -0400 Subject: Rename repos role to openshift_repos - Rename repos role to openshift_repos - Make openshift_repos a dependency of openshift_common - Add README and metadata for openshift_repos - Playbook updates for role rename - Verify libselinux-python is installed, otherwise some of the bulit-in modules we use fail --- roles/openshift_common/README.md | 1 + roles/openshift_common/meta/main.yml | 1 + roles/openshift_repos/README.md | 38 +++++++++++++ roles/openshift_repos/defaults/main.yaml | 5 ++ .../files/online/RPM-GPG-KEY-redhat-beta | 61 +++++++++++++++++++++ .../files/online/RPM-GPG-KEY-redhat-release | 63 ++++++++++++++++++++++ .../files/online/epel7-kubernetes.repo | 6 +++ .../files/online/epel7-openshift.repo | 6 +++ .../files/online/oso-rhui-rhel-7-extras.repo | 23 ++++++++ .../files/online/oso-rhui-rhel-7-server.repo | 21 ++++++++ .../files/online/rhel-7-libra-candidate.repo | 11 ++++ roles/openshift_repos/meta/main.yml | 14 +++++ roles/openshift_repos/tasks/main.yaml | 46 ++++++++++++++++ roles/openshift_repos/templates/yum_repo.j2 | 15 ++++++ roles/openshift_repos/vars/main.yml | 2 + roles/repos/defaults/main.yaml | 5 -- roles/repos/files/online/RPM-GPG-KEY-redhat-beta | 61 --------------------- .../repos/files/online/RPM-GPG-KEY-redhat-release | 63 ---------------------- roles/repos/files/online/epel7-kubernetes.repo | 6 --- roles/repos/files/online/epel7-openshift.repo | 6 --- .../repos/files/online/oso-rhui-rhel-7-extras.repo | 23 -------- .../repos/files/online/oso-rhui-rhel-7-server.repo | 21 -------- .../repos/files/online/rhel-7-libra-candidate.repo | 11 ---- roles/repos/tasks/main.yaml | 41 -------------- roles/repos/templates/yum_repo.j2 | 15 ------ roles/repos/vars/main.yml | 2 - 26 files changed, 313 insertions(+), 254 deletions(-) create mode 100644 roles/openshift_repos/README.md create mode 100644 roles/openshift_repos/defaults/main.yaml create mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta create mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release create mode 100644 roles/openshift_repos/files/online/epel7-kubernetes.repo create mode 100644 roles/openshift_repos/files/online/epel7-openshift.repo create mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo create mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo create mode 100644 roles/openshift_repos/files/online/rhel-7-libra-candidate.repo create mode 100644 roles/openshift_repos/meta/main.yml create mode 100644 roles/openshift_repos/tasks/main.yaml create mode 100644 roles/openshift_repos/templates/yum_repo.j2 create mode 100644 roles/openshift_repos/vars/main.yml delete mode 100644 roles/repos/defaults/main.yaml delete mode 100644 roles/repos/files/online/RPM-GPG-KEY-redhat-beta delete mode 100644 roles/repos/files/online/RPM-GPG-KEY-redhat-release delete mode 100644 roles/repos/files/online/epel7-kubernetes.repo delete mode 100644 roles/repos/files/online/epel7-openshift.repo delete mode 100644 roles/repos/files/online/oso-rhui-rhel-7-extras.repo delete mode 100644 roles/repos/files/online/oso-rhui-rhel-7-server.repo delete mode 100644 roles/repos/files/online/rhel-7-libra-candidate.repo delete mode 100644 roles/repos/tasks/main.yaml delete mode 100644 roles/repos/templates/yum_repo.j2 delete mode 100644 roles/repos/vars/main.yml (limited to 'roles') diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index c2ae609ff..a055cb032 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -25,6 +25,7 @@ Dependencies ------------ os_firewall +openshift_repos Example Playbook ---------------- diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml index 88b7677d0..cee4dd337 100644 --- a/roles/openshift_common/meta/main.yml +++ b/roles/openshift_common/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - { role: os_firewall } +- { role: openshift_repos } diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md new file mode 100644 index 000000000..6713e11fc --- /dev/null +++ b/roles/openshift_repos/README.md @@ -0,0 +1,38 @@ +OpenShift Repos +================ + +Configures repositories for an OpenShift installation + +Requirements +------------ + +A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, +rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos. + +Role Variables +-------------- + +| Name | Default value | | +|-------------------------------|---------------|----------------------------------------------| +| openshift_deployment_type | online | Possible values enterprise, origin, online | +| openshift_additional_repos | {} | TODO | + +Dependencies +------------ + +None. + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +TODO diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml new file mode 100644 index 000000000..6fe2bf621 --- /dev/null +++ b/roles/openshift_repos/defaults/main.yaml @@ -0,0 +1,5 @@ +--- +# TODO: once we are able to configure/deploy origin using the openshift roles, +# then we should default to origin +openshift_deployment_type: online +openshift_additional_repos: {} diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta new file mode 100644 index 000000000..7b40671a4 --- /dev/null +++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta @@ -0,0 +1,61 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT +kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A +BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo +gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P +xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D +FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 +Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i +QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm +G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt +0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR +fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB +tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv +bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT +ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy +6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ +OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 +0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc +MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u +QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE +Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 +DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 +B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH +V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT +CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== +=21pb +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG +package. Questions about this key should be sent to security@redhat.com. + + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.0.6 (GNU/Linux) +Comment: For info see http://www.gnupg.org + +mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp +Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd +LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi +UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe +II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW +QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz ++AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 +VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI +mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg +SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX +BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 +F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF +AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q +0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc +RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI +JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR +xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU +ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 +WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI +RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL +yACfb68fBd2pWEzLKsOk9imIobHHpzE= +=gpIn +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release new file mode 100644 index 000000000..0f83b622d --- /dev/null +++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release @@ -0,0 +1,63 @@ +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is used for packages in Red Hat +products shipped after November 2009, and for all updates to those +products. + +Questions about this key should be sent to security@redhat.com. + +pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF +0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF +0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c +u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh +XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H +5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW +9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj +/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 +PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY +HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF +buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB +tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 +LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK +CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC +2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf +C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 +un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E +0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE +IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh +8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL +Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki +JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 +OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq +dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== +=zbHE +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is a supporting (auxiliary) key for +Red Hat products shipped after November 2006 and for all updates to +those products. + +Questions about this key should be sent to security@redhat.com. + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT +VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A +UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C +yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu +MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg +Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z +z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 +eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq +SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg +SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC +AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg +ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX +=d/bm +-----END PGP PUBLIC KEY BLOCK----- + diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo new file mode 100644 index 000000000..1deae2939 --- /dev/null +++ b/roles/openshift_repos/files/online/epel7-kubernetes.repo @@ -0,0 +1,6 @@ +[maxamillion-epel7-kubernetes] +name=Copr repo for epel7-kubernetes owned by maxamillion +baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/ +skip_if_unavailable=True +gpgcheck=0 +enabled=1 diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo new file mode 100644 index 000000000..c7629872d --- /dev/null +++ b/roles/openshift_repos/files/online/epel7-openshift.repo @@ -0,0 +1,6 @@ +[maxamillion-origin-next] +name=Copr repo for origin-next owned by maxamillion +baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ +skip_if_unavailable=False +gpgcheck=0 +enabled=1 diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo new file mode 100644 index 000000000..cfe41f691 --- /dev/null +++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo @@ -0,0 +1,23 @@ +[oso-rhui-rhel-server-extras] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-extras-htb] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo new file mode 100644 index 000000000..ddc93193d --- /dev/null +++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo @@ -0,0 +1,21 @@ +[oso-rhui-rhel-server-releases] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-releases-optional] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo new file mode 100644 index 000000000..b4215679f --- /dev/null +++ b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo @@ -0,0 +1,11 @@ +[rhel-7-libra-candidate] +name=rhel-7-libra-candidate - \$basearch +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ + https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ +gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted +skip_if_unavailable=True +gpgcheck=0 +enabled=1 +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem +sslverify=False diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml new file mode 100644 index 000000000..cc18c453c --- /dev/null +++ b/roles/openshift_repos/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: TODO + description: OpenShift Repositories + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.7 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: [] diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml new file mode 100644 index 000000000..6219c4906 --- /dev/null +++ b/roles/openshift_repos/tasks/main.yaml @@ -0,0 +1,46 @@ +--- +# TODO: Add flag for enabling EPEL repo, default to false + +- assert: + that: openshift_deployment_type in known_openshift_deployment_types + +# TODO: remove this when origin support actually works +- fail: msg="OpenShift Origin support is not currently enabled" + when: openshift_deployment_type == 'origin' + +- name: Ensure libselinux-python is installed + yum: + pkg: libselinux-python + state: present + +- name: Create any additional repos that are defined + template: + src: yum_repo.j2 + dest: /etc/yum.repos.d/openshift_additional.repo + when: openshift_additional_repos | length > 0 + +- name: Remove the additional repos if no longer defined + file: + dest: /etc/yum.repos.d/openshift_additional.repo + state: absent + when: openshift_additional_repos | length == 0 + +- name: Remove any yum repo files for other deployment types + file: + path: "/etc/yum.repos.d/{{ item | basename }}" + state: absent + with_fileglob: + - '*/*' + when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$")) + +- name: Configure gpg keys if needed + copy: src={{ item }} dest=/etc/pki/rpm-gpg/ + with_fileglob: + - "{{ openshift_deployment_type }}/*" + when: item | basename | match("RPM-GPG-KEY-") + +- name: Configure yum repositories + copy: src={{ item }} dest=/etc/yum.repos.d/ + with_fileglob: + - "{{ openshift_deployment_type }}/*" + when: item | basename | search(".*\.repo$") diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2 new file mode 100644 index 000000000..7ea2c7460 --- /dev/null +++ b/roles/openshift_repos/templates/yum_repo.j2 @@ -0,0 +1,15 @@ +# {{ ansible_managed }} +{% for repo in openshift_additional_repos %} +[{{ repo.id }}] +name={{ repo.name | default(repo.id) }} +baseurl={{ repo.baseurl }} +{% set enable_repo = repo.enabled | default('1') %} +enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} +{% set enable_gpg_check = repo.gpgcheck | default('1') %} +gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} +{% for key, value in repo.iteritems() %} +{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml new file mode 100644 index 000000000..bbb4c77e7 --- /dev/null +++ b/roles/openshift_repos/vars/main.yml @@ -0,0 +1,2 @@ +--- +known_openshift_deployment_types: ['origin', 'online', 'enterprise'] diff --git a/roles/repos/defaults/main.yaml b/roles/repos/defaults/main.yaml deleted file mode 100644 index 6fe2bf621..000000000 --- a/roles/repos/defaults/main.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# TODO: once we are able to configure/deploy origin using the openshift roles, -# then we should default to origin -openshift_deployment_type: online -openshift_additional_repos: {} diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/repos/files/online/RPM-GPG-KEY-redhat-beta deleted file mode 100644 index 7b40671a4..000000000 --- a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta +++ /dev/null @@ -1,61 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT -kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A -BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo -gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P -xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D -FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 -Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i -QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm -G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt -0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR -fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB -tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv -bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT -ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy -6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ -OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 -0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc -MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u -QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE -Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 -DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 -B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH -V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT -CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== -=21pb ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG -package. Questions about this key should be sent to security@redhat.com. - - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.0.6 (GNU/Linux) -Comment: For info see http://www.gnupg.org - -mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp -Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd -LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi -UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe -II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW -QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz -+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 -VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI -mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg -SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX -BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 -F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF -AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q -0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc -RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI -JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR -xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU -ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 -WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI -RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL -yACfb68fBd2pWEzLKsOk9imIobHHpzE= -=gpIn ------END PGP PUBLIC KEY BLOCK----- diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-release b/roles/repos/files/online/RPM-GPG-KEY-redhat-release deleted file mode 100644 index 0f83b622d..000000000 --- a/roles/repos/files/online/RPM-GPG-KEY-redhat-release +++ /dev/null @@ -1,63 +0,0 @@ -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is used for packages in Red Hat -products shipped after November 2009, and for all updates to those -products. - -Questions about this key should be sent to security@redhat.com. - -pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF -0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF -0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c -u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh -XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H -5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW -9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj -/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 -PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY -HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF -buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB -tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 -LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK -CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC -2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf -C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 -un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E -0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE -IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh -8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL -Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki -JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 -OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq -dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== -=zbHE ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is a supporting (auxiliary) key for -Red Hat products shipped after November 2006 and for all updates to -those products. - -Questions about this key should be sent to security@redhat.com. - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT -VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A -UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C -yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu -MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg -Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z -z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 -eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq -SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg -SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC -AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg -ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX -=d/bm ------END PGP PUBLIC KEY BLOCK----- - diff --git a/roles/repos/files/online/epel7-kubernetes.repo b/roles/repos/files/online/epel7-kubernetes.repo deleted file mode 100644 index 1deae2939..000000000 --- a/roles/repos/files/online/epel7-kubernetes.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-epel7-kubernetes] -name=Copr repo for epel7-kubernetes owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/ -skip_if_unavailable=True -gpgcheck=0 -enabled=1 diff --git a/roles/repos/files/online/epel7-openshift.repo b/roles/repos/files/online/epel7-openshift.repo deleted file mode 100644 index c7629872d..000000000 --- a/roles/repos/files/online/epel7-openshift.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-origin-next] -name=Copr repo for origin-next owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ -skip_if_unavailable=False -gpgcheck=0 -enabled=1 diff --git a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/repos/files/online/oso-rhui-rhel-7-extras.repo deleted file mode 100644 index cfe41f691..000000000 --- a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo +++ /dev/null @@ -1,23 +0,0 @@ -[oso-rhui-rhel-server-extras] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-extras-htb] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ -enabled=0 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/repos/files/online/oso-rhui-rhel-7-server.repo b/roles/repos/files/online/oso-rhui-rhel-7-server.repo deleted file mode 100644 index ddc93193d..000000000 --- a/roles/repos/files/online/oso-rhui-rhel-7-server.repo +++ /dev/null @@ -1,21 +0,0 @@ -[oso-rhui-rhel-server-releases] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-releases-optional] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/repos/files/online/rhel-7-libra-candidate.repo b/roles/repos/files/online/rhel-7-libra-candidate.repo deleted file mode 100644 index b4215679f..000000000 --- a/roles/repos/files/online/rhel-7-libra-candidate.repo +++ /dev/null @@ -1,11 +0,0 @@ -[rhel-7-libra-candidate] -name=rhel-7-libra-candidate - \$basearch -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ - https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ -gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted -skip_if_unavailable=True -gpgcheck=0 -enabled=1 -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem -sslverify=False diff --git a/roles/repos/tasks/main.yaml b/roles/repos/tasks/main.yaml deleted file mode 100644 index 43786da41..000000000 --- a/roles/repos/tasks/main.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -# TODO: Add flag for enabling EPEL repo, default to false - -- assert: - that: openshift_deployment_type in known_openshift_deployment_types - -# TODO: remove this when origin support actually works -- fail: msg="OpenShift Origin support is not currently enabled" - when: openshift_deployment_type == 'origin' - -- name: Create any additional repos that are defined - template: - src: yum_repo.j2 - dest: /etc/yum.repos.d/openshift_additional.repo - when: openshift_additional_repos | length > 0 - -- name: Remove the additional repos if no longer defined - file: - dest: /etc/yum.repos.d/openshift_additional.repo - state: absent - when: openshift_additional_repos | length == 0 - -- name: Remove any yum repo files for other deployment types - file: - path: "/etc/yum.repos.d/{{ item | basename }}" - state: absent - with_fileglob: - - '*/*' - when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$")) - -- name: Configure gpg keys if needed - copy: src={{ item }} dest=/etc/pki/rpm-gpg/ - with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | match("RPM-GPG-KEY-") - -- name: Configure yum repositories - copy: src={{ item }} dest=/etc/yum.repos.d/ - with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | search(".*\.repo$") diff --git a/roles/repos/templates/yum_repo.j2 b/roles/repos/templates/yum_repo.j2 deleted file mode 100644 index 7ea2c7460..000000000 --- a/roles/repos/templates/yum_repo.j2 +++ /dev/null @@ -1,15 +0,0 @@ -# {{ ansible_managed }} -{% for repo in openshift_additional_repos %} -[{{ repo.id }}] -name={{ repo.name | default(repo.id) }} -baseurl={{ repo.baseurl }} -{% set enable_repo = repo.enabled | default('1') %} -enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} -{% set enable_gpg_check = repo.gpgcheck | default('1') %} -gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} -{% for key, value in repo.iteritems() %} -{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %} -{{ key }}={{ value }} -{% endif %} -{% endfor %} -{% endfor %} diff --git a/roles/repos/vars/main.yml b/roles/repos/vars/main.yml deleted file mode 100644 index bbb4c77e7..000000000 --- a/roles/repos/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -known_openshift_deployment_types: ['origin', 'online', 'enterprise'] -- cgit v1.2.3 From 7c7cb82fdd5583784fd5832b92886abf86934325 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 6 Mar 2015 13:52:20 -0700 Subject: Use ansible playbook to initialize openshift cluster * Added playbooks/gce/openshift-cluster * Added bin/cluster (will replace cluster.sh) --- roles/docker/tasks/main.yml | 2 +- roles/openshift_common/tasks/main.yml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 2ecefd588..ca700db17 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -11,5 +11,5 @@ # From the origin rpm there exists instructions on how to # setup origin properly. The following steps come from there - name: Change root to be in the Docker group - user: name=root groups=docker append=yes + user: name=root groups=dockerroot append=yes diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..656a3880d 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,6 +2,9 @@ - name: Set hostname hostname: name={{ openshift_hostname }} +- name: Update all packages + yum: name=* state=latest + - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 66332175b61a5a538aa73b76cbcf151e1882a52c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:55:04 -0400 Subject: Move yum update * to new os_update_latest role --- roles/openshift_common/tasks/main.yml | 3 --- roles/os_update_latest/tasks/main.yml | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 roles/os_update_latest/tasks/main.yml (limited to 'roles') diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 656a3880d..07737a71f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,9 +2,6 @@ - name: Set hostname hostname: name={{ openshift_hostname }} -- name: Update all packages - yum: name=* state=latest - - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml new file mode 100644 index 000000000..4a2c3d47a --- /dev/null +++ b/roles/os_update_latest/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Update all packages + yum: name=* state=latest -- cgit v1.2.3 From 3324b6c8889074ee17d7be05588de8b58aa3774f Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 6 Mar 2015 13:52:20 -0700 Subject: Use ansible playbook to initialize openshift cluster * Added playbooks/gce/openshift-cluster * Added bin/cluster (will replace cluster.sh) --- roles/openshift_common/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..656a3880d 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,6 +2,9 @@ - name: Set hostname hostname: name={{ openshift_hostname }} +- name: Update all packages + yum: name=* state=latest + - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 6ad94864f7d985f1bb671536bd398ea4bcd0f163 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 16 Mar 2015 15:13:12 -0400 Subject: add repos role to gce cluster launch so that we are applying os_update_latest after repo config --- roles/openshift_common/tasks/main.yml | 3 --- 1 file changed, 3 deletions(-) (limited to 'roles') diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 656a3880d..07737a71f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,9 +2,6 @@ - name: Set hostname hostname: name={{ openshift_hostname }} -- name: Update all packages - yum: name=* state=latest - - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 85e6948fca954d3c066bf5a6123ada6b96adf45c Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Thu, 19 Mar 2015 15:06:38 -0700 Subject: * Add DOCKER chain to iptables --- roles/os_firewall/tasks/firewall/iptables.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'roles') diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..3d46d6e2d 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,6 +41,20 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 +- name: Check for DOCKER chain + shell: iptables -L |grep '^Chain DOCKER' + ignore_errors: yes + register: check_for_chain + +- name: Create DOCKER chain + command: iptables -N DOCKER + register: create_chain + when: check_for_chain.rc != 0 + +- name: Persist DOCKER chain + command: service iptables save + when: create_chain.rc == 0 + - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From 9fb5bbc79a6753c6125e4f3ea007040dad0482ef Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 19 Mar 2015 23:04:21 -0400 Subject: Add verify_chain action to os_firewall_manage_iptables module - Add verify_chain action to os_firewall_manage_iptables module - Update os_firewall module to use os_firewall_manage_iptables for creating the DOCKER chain. --- .../library/os_firewall_manage_iptables.py | 62 ++++++++++++++-------- roles/os_firewall/tasks/firewall/iptables.yml | 20 +++---- 2 files changed, 47 insertions(+), 35 deletions(-) (limited to 'roles') diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index fef710055..6a018d022 100644 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -51,11 +51,13 @@ class IpTablesCreateJumpRuleError(IpTablesError): # exception was thrown later. for example, when the chain is created # successfully, but the add/remove rule fails. class IpTablesManager: - def __init__(self, module, ip_version, check_mode, chain): + def __init__(self, module): self.module = module - self.ip_version = ip_version - self.check_mode = check_mode - self.chain = chain + self.ip_version = module.params['ip_version'] + self.check_mode = module.check_mode + self.chain = module.params['chain'] + self.create_jump_rule = module.params['create_jump_rule'] + self.jump_rule_chain = module.params['jump_rule_chain'] self.cmd = self.gen_cmd() self.save_cmd = self.gen_save_cmd() self.output = [] @@ -70,13 +72,16 @@ class IpTablesManager: msg="Failed to save iptables rules", cmd=e.cmd, exit_code=e.returncode, output=e.output) + def verify_chain(self): + if not self.chain_exists(): + self.create_chain() + if self.create_jump_rule and not self.jump_rule_exists(): + self.create_jump() + def add_rule(self, port, proto): rule = self.gen_rule(port, proto) if not self.rule_exists(rule): - if not self.chain_exists(): - self.create_chain() - if not self.jump_rule_exists(): - self.create_jump_rule() + self.verify_chain() if self.check_mode: self.changed = True @@ -121,13 +126,13 @@ class IpTablesManager: return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW', '-m', proto, '--dport', str(port), '-j', 'ACCEPT'] - def create_jump_rule(self): + def create_jump(self): if self.check_mode: self.changed = True self.output.append("Create jump rule for chain %s" % self.chain) else: try: - cmd = self.cmd + ['-L', 'INPUT', '--line-numbers'] + cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers'] output = check_output(cmd, stderr=subprocess.STDOUT) # break the input rules into rows and columns @@ -144,11 +149,11 @@ class IpTablesManager: continue last_rule_target = rule[1] - # Raise an exception if we do not find a valid INPUT rule + # Raise an exception if we do not find a valid rule if not last_rule_num or not last_rule_target: raise IpTablesCreateJumpRuleError( chain=self.chain, - msg="Failed to find existing INPUT rules", + msg="Failed to find existing %s rules" % self.jump_rule_chain, cmd=None, exit_code=None, output=None) # Naively assume that if the last row is a REJECT rule, then @@ -156,19 +161,20 @@ class IpTablesManager: # assume that we can just append the rule. if last_rule_target == 'REJECT': # insert rule - cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)] + cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)] else: # append rule - cmd = self.cmd + ['-A', 'INPUT'] + cmd = self.cmd + ['-A', self.jump_rule_chain] cmd += ['-j', self.chain] output = check_output(cmd, stderr=subprocess.STDOUT) changed = True self.output.append(output) + self.save() except subprocess.CalledProcessError as e: if '--line-numbers' in e.cmd: raise IpTablesCreateJumpRuleError( chain=self.chain, - msg="Failed to query existing INPUT rules to " + msg="Failed to query existing %s rules to " % self.jump_rule_chain + "determine jump rule location", cmd=e.cmd, exit_code=e.returncode, output=e.output) @@ -192,6 +198,7 @@ class IpTablesManager: self.changed = True self.output.append("Successfully created chain %s" % self.chain) + self.save() except subprocess.CalledProcessError as e: raise IpTablesCreateChainError( chain=self.chain, @@ -200,7 +207,7 @@ class IpTablesManager: ) def jump_rule_exists(self): - cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain] + cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain] return True if subprocess.call(cmd) == 0 else False def chain_exists(self): @@ -220,9 +227,12 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - action=dict(required=True, choices=['add', 'remove']), - protocol=dict(required=True, choices=['tcp', 'udp']), - port=dict(required=True, type='int'), + action=dict(required=True, choices=['add', 'remove', 'verify_chain']), + chain=dict(required=False, default='OS_FIREWALL_ALLOW'), + create_jump_rule=dict(required=False, type='bool', default=True), + jump_rule_chain=dict(required=False, default='INPUT'), + protocol=dict(required=False, choices=['tcp', 'udp']), + port=dict(required=False, type='int'), ip_version=dict(required=False, default='ipv4', choices=['ipv4', 'ipv6']), ), @@ -232,16 +242,24 @@ def main(): action = module.params['action'] protocol = module.params['protocol'] port = module.params['port'] - ip_version = module.params['ip_version'] - chain = 'OS_FIREWALL_ALLOW' - iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain) + if action in ['add', 'remove']: + if not protocol: + error = "protocol is required when action is %s" % action + module.fail_json(msg=error) + if not port: + error = "port is required when action is %s" % action + module.fail_json(msg=error) + + iptables_manager = IpTablesManager(module) try: if action == 'add': iptables_manager.add_rule(port, protocol) elif action == 'remove': iptables_manager.remove_rule(port, protocol) + elif action == 'verify_chain': + iptables_manager.verify_chain() except IpTablesError as e: module.fail_json(msg=e.msg) diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 3d46d6e2d..72a3401cf 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,19 +41,13 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -- name: Check for DOCKER chain - shell: iptables -L |grep '^Chain DOCKER' - ignore_errors: yes - register: check_for_chain - -- name: Create DOCKER chain - command: iptables -N DOCKER - register: create_chain - when: check_for_chain.rc != 0 - -- name: Persist DOCKER chain - command: service iptables save - when: create_chain.rc == 0 +# Workaround for Docker 1.4 to create DOCKER chain +- name: Add DOCKER chain + os_firewall_manage_iptables: + name: "DOCKER chain" + action: verify_chain + create_jump_rule: no +# End of Docker 1.4 workaround - name: Add iptables allow rules os_firewall_manage_iptables: -- cgit v1.2.3 From 8f35aff7245246de4116fcf3c81e7f095cf1be3a Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:11:22 -0400 Subject: Add new role os_env_extras_node that is a subset of the docker role - Does not install or start docker, since the openshift-node role will handle that for us - Only add root to the dockerroot group and configures the enter-container script. --- roles/os_env_extras_node/README.md | 38 +++++++ roles/os_env_extras_node/files/enter-container.sh | 13 +++ roles/os_env_extras_node/meta/main.yml | 124 ++++++++++++++++++++++ roles/os_env_extras_node/tasks/main.yml | 7 ++ 4 files changed, 182 insertions(+) create mode 100644 roles/os_env_extras_node/README.md create mode 100755 roles/os_env_extras_node/files/enter-container.sh create mode 100644 roles/os_env_extras_node/meta/main.yml create mode 100644 roles/os_env_extras_node/tasks/main.yml (limited to 'roles') diff --git a/roles/os_env_extras_node/README.md b/roles/os_env_extras_node/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/roles/os_env_extras_node/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/os_env_extras_node/files/enter-container.sh b/roles/os_env_extras_node/files/enter-container.sh new file mode 100755 index 000000000..7cf5b8d83 --- /dev/null +++ b/roles/os_env_extras_node/files/enter-container.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ $# -ne 1 ] +then + echo + echo "Usage: $(basename $0) " + echo + exit 1 +fi + +PID=$(docker inspect --format '{{.State.Pid}}' $1) + +nsenter --target $PID --mount --uts --ipc --net --pid diff --git a/roles/os_env_extras_node/meta/main.yml b/roles/os_env_extras_node/meta/main.yml new file mode 100644 index 000000000..c5c362c60 --- /dev/null +++ b/roles/os_env_extras_node/meta/main.yml @@ -0,0 +1,124 @@ +--- +galaxy_info: + author: your name + description: + company: your company (optional) + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + #- name: EL + # versions: + # - all + # - 5 + # - 6 + # - 7 + #- name: GenericUNIX + # versions: + # - all + # - any + #- name: Fedora + # versions: + # - all + # - 16 + # - 17 + # - 18 + # - 19 + # - 20 + #- name: opensuse + # versions: + # - all + # - 12.1 + # - 12.2 + # - 12.3 + # - 13.1 + # - 13.2 + #- name: Amazon + # versions: + # - all + # - 2013.03 + # - 2013.09 + #- name: GenericBSD + # versions: + # - all + # - any + #- name: FreeBSD + # versions: + # - all + # - 8.0 + # - 8.1 + # - 8.2 + # - 8.3 + # - 8.4 + # - 9.0 + # - 9.1 + # - 9.1 + # - 9.2 + #- name: Ubuntu + # versions: + # - all + # - lucid + # - maverick + # - natty + # - oneiric + # - precise + # - quantal + # - raring + # - saucy + # - trusty + #- name: SLES + # versions: + # - all + # - 10SP3 + # - 10SP4 + # - 11 + # - 11SP1 + # - 11SP2 + # - 11SP3 + #- name: GenericLinux + # versions: + # - all + # - any + #- name: Debian + # versions: + # - all + # - etch + # - lenny + # - squeeze + # - wheezy + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + #- cloud + #- cloud:ec2 + #- cloud:gce + #- cloud:rax + #- clustering + #- database + #- database:nosql + #- database:sql + #- development + #- monitoring + #- networking + #- packaging + #- system + #- web +dependencies: [] + # List your role dependencies here, one per line. Only + # dependencies available via galaxy should be listed here. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml new file mode 100644 index 000000000..065f71f74 --- /dev/null +++ b/roles/os_env_extras_node/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755 + +# From the origin rpm there exists instructions on how to +# setup origin properly. The following steps come from there +- name: Change root to be in the Docker group + user: name=root groups=dockerroot append=yes -- cgit v1.2.3 From 70c5a715debc1c1a900c6dcfe178b36b2a014ab4 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:14:17 -0400 Subject: Use docker as package name instead of docker-io --- roles/docker/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index ca700db17..593c4c877 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,7 +1,7 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker-io + yum: pkg=docker - name: enable and start the docker service service: name=docker enabled=yes state=started -- cgit v1.2.3 From 8b68846806d5294b5f43d14772d59aa2b8cf5e73 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:43:00 -0400 Subject: remove os_firewall creation of DOCKER chain --- roles/os_firewall/tasks/firewall/iptables.yml | 8 -------- 1 file changed, 8 deletions(-) (limited to 'roles') diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 72a3401cf..87e77c083 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,14 +41,6 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -# Workaround for Docker 1.4 to create DOCKER chain -- name: Add DOCKER chain - os_firewall_manage_iptables: - name: "DOCKER chain" - action: verify_chain - create_jump_rule: no -# End of Docker 1.4 workaround - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From d67c5b8f79609d2d3b07cc009f58e3dc988782c5 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 16:30:49 -0400 Subject: node registration changes - Remove default value for openshift_hostname and make it required - Remove workarounds that are no longer needed - Remove resources parameter from openshift_register_node module - pre-create node certificates for each node before registering node - distribute created node certificates to each node - Move node registration logic to a new openshift_register_nodes role - This is because we now have to run the steps on a master as opposed to on the nodes like we were previously doing. - Rename openshift_register_node module to kubernetes_register_node, one more step to genericizing enough for upstreaming, however there are still plenty of openshift specific commands that still need to be genericized. --- roles/openshift_common/README.md | 2 +- roles/openshift_common/defaults/main.yml | 2 +- roles/openshift_master/README.md | 2 +- roles/openshift_master/tasks/main.yml | 35 +- roles/openshift_node/README.md | 3 +- roles/openshift_node/defaults/main.yml | 6 - .../library/openshift_register_node.py | 390 --------------------- roles/openshift_node/tasks/main.yml | 68 +--- roles/openshift_register_nodes/README.md | 38 ++ roles/openshift_register_nodes/defaults/main.yml | 5 + .../library/kubernetes_register_node.py | 370 +++++++++++++++++++ roles/openshift_register_nodes/meta/main.yml | 128 +++++++ roles/openshift_register_nodes/tasks/main.yml | 71 ++++ roles/openshift_sdn_node/README.md | 2 +- 14 files changed, 641 insertions(+), 481 deletions(-) delete mode 100644 roles/openshift_node/library/openshift_register_node.py create mode 100644 roles/openshift_register_nodes/README.md create mode 100644 roles/openshift_register_nodes/defaults/main.yml create mode 100644 roles/openshift_register_nodes/library/kubernetes_register_node.py create mode 100644 roles/openshift_register_nodes/meta/main.yml create mode 100644 roles/openshift_register_nodes/tasks/main.yml (limited to 'roles') diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index fce79047c..592a276f9 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -16,7 +16,7 @@ Role Variables |-------------------------------|------------------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | Workaround needed to set hostname to IP address | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_env | default | Envrionment name if multiple OpenShift instances | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index eb6edbc03..86351f6f6 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -4,4 +4,4 @@ openshift_debug_level: 0 # TODO: Once openshift stops resolving hostnames for node queries remove # this... openshift_hostname_workaround: true -openshift_hostname: "{{ ansible_default_ipv4.address if openshift_hostname_workaround else ansible_fqdn }}" + diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 5a1b889b2..2f03b4990 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -27,7 +27,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index d5f4776dc..52f5f694c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -1,4 +1,8 @@ --- +# TODO: allow for overriding default ports where possible +# TODO: if setting up multiple masters, will need to predistribute the certs +# to the additional masters before starting openshift-master + - name: Install OpenShift Master package yum: pkg=openshift-master state=installed @@ -6,9 +10,7 @@ lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if - openshift_node_ips %} --nodes={{ openshift_node_ips - | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" + line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" notify: - restart openshift-master @@ -34,42 +36,15 @@ option: externally_managed value: "{{ openshift_master_manage_service_externally }}" -# TODO: remove this when origin PR #1298 has landed in OSE -- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal - command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/ - args: - creates: /etc/systemd/system/openshift-master.service -- ini_file: - dest: /etc/systemd/system/openshift-master.service - option: TimeoutStartSec - section: Service - value: 300 - state: present - register: result -- command: systemctl daemon-reload - when: result | changed -# End of workaround pending PR #1298 - - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started when: not openshift_master_manage_service_externally register: result -#TODO: remove this when origin PR #1204 has landed in OSE -- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated - pause: seconds=30 - when: result | changed -# End of workaround pending PR #1204 - - name: Disable openshift-master if openshift-master is managed externally service: name=openshift-master enabled=false when: openshift_master_manage_service_externally -# TODO: create an os_vars role that has generic env related config and move -# the root kubeconfig setting there, cannot use dependencies to force ordering -# with openshift_node and openshift_master because the way conditional -# dependencies work with current ansible would also exclude the -# openshift_common dependency. - name: Create .kube directory file: path: /root/.kube diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 9210bab16..d537a35a5 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -21,7 +21,6 @@ From this role: | openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts | | openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | | openshift_registry_url | UNDEF (Optional) | Default docker registry to use | -| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory | From openshift_common: | Name | Default Value | | @@ -29,7 +28,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index e4d5ebfee..6dc73a96e 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -4,9 +4,3 @@ openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}" os_firewall_allow: - service: OpenShift kubelet port: 10250/tcp -openshift_node_resources: - cpu: - memory: - cidr: -openshift_node_labels: {} -openshift_node_annotations: {} diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py deleted file mode 100644 index 4922585d7..000000000 --- a/roles/openshift_node/library/openshift_register_node.py +++ /dev/null @@ -1,390 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - -import os -import multiprocessing -import socket -from subprocess import check_output, Popen -from decimal import * - -DOCUMENTATION = ''' ---- -module: kubernetes_register_node -short_description: Registers a kubernetes node with a master -description: - - Registers a kubernetes node with a master -options: - name: - default: null - description: - - Identifier for this node (usually the node fqdn). - required: true - api_verison: - choices: ['v1beta1', 'v1beta3'] - default: 'v1beta1' - description: - - Kubernetes API version to use - required: true - host_ip: - default: null - description: - - IP Address to associate with the node when registering. - Available in the following API versions: v1beta1. - required: false - hostnames: - default: [] - description: - - Valid hostnames for this node. Available in the following API - versions: v1beta3. - required: false - external_ips: - default: [] - description: - - External IP Addresses for this node. Available in the following API - versions: v1beta3. - required: false - internal_ips: - default: [] - description: - - Internal IP Addresses for this node. Available in the following API - versions: v1beta3. - required: false - cpu: - default: null - description: - - Number of CPUs to allocate for this node. If not provided, then - the node will be registered to advertise the number of logical - CPUs available. When using the v1beta1 API, you must specify the - CPU count as a floating point number with no more than 3 decimal - places. API version v1beta3 and newer accepts arbitrary float - values. - required: false - memory: - default: null - description: - - Memory available for this node. If not provided, then the node - will be registered to advertise 80% of MemTotal as available - memory. When using the v1beta1 API, you must specify the memory - size in bytes. API version v1beta3 and newer accepts binary SI - and decimal SI values. - required: false -''' -EXAMPLES = ''' -# Minimal node registration -- openshift_register_node: name=ose3.node.example.com - -# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of -# Memory -- openshift_register_node: - name: ose3.node.example.com - api_version: v1beta1 - hostIP: 192.168.1.1 - cpu: 1 - memory: 500000000 - -# Node registration using the v1beta3 API, setting an alternate hostname, -# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory -- openshift_register_node: - name: ose3.node.example.com - api_version: v1beta3 - external_ips: ['192.168.1.5'] - internal_ips: ['10.0.0.5'] - hostnames: ['ose2.node.internal.local'] - cpu: 3.5 - memory: 1Ti -''' - - -class ClientConfigException(Exception): - pass - -class ClientConfig: - def __init__(self, client_opts, module): - _, output, error = module.run_command(["/usr/bin/openshift", "ex", - "config", "view", "-o", - "json"] + client_opts, - check_rc = True) - self.config = json.loads(output) - - if not (bool(self.config['clusters']) or - bool(self.config['contexts']) or - bool(self.config['current-context']) or - bool(self.config['users'])): - raise ClientConfigException(msg="Client config missing required " \ - "values", - output=output) - - def current_context(self): - return self.config['current-context'] - - def section_has_value(self, section_name, value): - section = self.config[section_name] - if isinstance(section, dict): - return value in section - else: - val = next((item for item in section - if item['name'] == value), None) - return val is not None - - def has_context(self, context): - return self.section_has_value('contexts', context) - - def has_user(self, user): - return self.section_has_value('users', user) - - def has_cluster(self, cluster): - return self.section_has_value('clusters', cluster) - - def get_value_for_context(self, context, attribute): - contexts = self.config['contexts'] - if isinstance(contexts, dict): - return contexts[context][attribute] - else: - return next((c['context'][attribute] for c in contexts - if c['name'] == context), None) - - def get_user_for_context(self, context): - return self.get_value_for_context(context, 'user') - - def get_cluster_for_context(self, context): - return self.get_value_for_context(context, 'cluster') - -class Util: - @staticmethod - def getLogicalCores(): - return multiprocessing.cpu_count() - - @staticmethod - def getMemoryPct(pct): - with open('/proc/meminfo', 'r') as mem: - for line in mem: - entries = line.split() - if str(entries.pop(0)) == 'MemTotal:': - mem_total_kb = Decimal(entries.pop(0)) - mem_capacity_kb = mem_total_kb * Decimal(pct) - return str(mem_capacity_kb.to_integral_value() * 1024) - - return "" - - @staticmethod - def remove_empty_elements(mapping): - if isinstance(mapping, dict): - m = mapping.copy() - for key, val in mapping.iteritems(): - if not val: - del m[key] - return m - else: - return mapping - -class NodeResources: - def __init__(self, version, cpu=None, memory=None): - if version == 'v1beta1': - self.resources = dict(capacity=dict()) - self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() - self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75) - - def get_resources(self): - return Util.remove_empty_elements(self.resources) - -class NodeSpec: - def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): - if version == 'v1beta3': - self.spec = dict(podCIDR=cidr, externalID=externalID, - capacity=dict()) - self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() - self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75) - - def get_spec(self): - return Util.remove_empty_elements(self.spec) - -class NodeStatus: - def addAddresses(self, addressType, addresses): - addressList = [] - for address in addresses: - addressList.append(dict(type=addressType, address=address)) - return addressList - - def __init__(self, version, externalIPs = [], internalIPs = [], - hostnames = []): - if version == 'v1beta3': - self.status = dict(addresses = addAddresses('ExternalIP', - externalIPs) + - addAddresses('InternalIP', - internalIPs) + - addAddresses('Hostname', - hostnames)) - - def get_status(self): - return Util.remove_empty_elements(self.status) - -class Node: - def __init__(self, module, client_opts, version='v1beta1', name=None, - hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], - cpu=None, memory=None, labels=dict(), annotations=dict(), - podCIDR=None, externalID=None): - self.module = module - self.client_opts = client_opts - if version == 'v1beta1': - self.node = dict(id = name, - kind = 'Node', - apiVersion = version, - hostIP = hostIP, - resources = NodeResources(version, cpu, memory), - cidr = podCIDR, - labels = labels, - annotations = annotations - ) - elif version == 'v1beta3': - metadata = dict(name = name, - labels = labels, - annotations = annotations - ) - self.node = dict(kind = 'Node', - apiVersion = version, - metadata = metadata, - spec = NodeSpec(version, cpu, memory, podCIDR, - externalID), - status = NodeStatus(version, externalIPs, - internalIPs, hostnames), - ) - - def get_name(self): - if self.node['apiVersion'] == 'v1beta1': - return self.node['id'] - elif self.node['apiVersion'] == 'v1beta3': - return self.node['name'] - - def get_node(self): - node = self.node.copy() - if self.node['apiVersion'] == 'v1beta1': - node['resources'] = self.node['resources'].get_resources() - elif self.node['apiVersion'] == 'v1beta3': - node['spec'] = self.node['spec'].get_spec() - node['status'] = self.node['status'].get_status() - return Util.remove_empty_elements(node) - - def exists(self): - _, output, error = self.module.run_command(["/usr/bin/osc", "get", - "nodes"] + self.client_opts, - check_rc = True) - if re.search(self.module.params['name'], output, re.MULTILINE): - return True - return False - - def create(self): - cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] - rc, output, error = self.module.run_command(cmd, - data=self.module.jsonify(self.get_node())) - if rc != 0: - if re.search("minion \"%s\" already exists" % self.get_name(), - error): - self.module.exit_json(changed=False, - msg="node definition already exists", - node=self.get_node()) - else: - self.module.fail_json(msg="Node creation failed.", rc=rc, - output=output, error=error, - node=self.get_node()) - else: - return True - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True, type = 'str'), - host_ip = dict(type = 'str'), - hostnames = dict(type = 'list', default = []), - external_ips = dict(type = 'list', default = []), - internal_ips = dict(type = 'list', default = []), - api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 - choices = ['v1beta1', 'v1beta3']), - cpu = dict(type = 'str'), - memory = dict(type = 'str'), - labels = dict(type = 'dict', default = {}), # TODO: needs documented - annotations = dict(type = 'dict', default = {}), # TODO: needs documented - pod_cidr = dict(type = 'str'), # TODO: needs documented - external_id = dict(type = 'str'), # TODO: needs documented - client_config = dict(type = 'str'), # TODO: needs documented - client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented - client_context = dict(type = 'str', default = 'master'), # TODO: needs documented - client_user = dict(type = 'str', default = 'admin') # TODO: needs documented - ), - mutually_exclusive = [ - ['host_ip', 'external_ips'], - ['host_ip', 'internal_ips'], - ['host_ip', 'hostnames'], - ], - supports_check_mode=True - ) - - user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig')) - if not (user_has_client_config or module.params['client_config']): - module.fail_json(msg="Could not locate client configuration, " - "client_config must be specified if " - "~/.kube/.kubeconfig is not present") - - client_opts = [] - if module.params['client_config']: - client_opts.append("--kubeconfig=%s" % module.params['client_config']) - - try: - config = ClientConfig(client_opts, module) - except ClientConfigException as e: - module.fail_json(msg="Failed to get client configuration", exception=e) - - client_context = module.params['client_context'] - if config.has_context(client_context): - if client_context != config.current_context(): - client_opts.append("--context=%s" % client_context) - else: - module.fail_json(msg="Context %s not found in client config" % - client_context) - - client_user = module.params['client_user'] - if config.has_user(client_user): - if client_user != config.get_user_for_context(client_context): - client_opts.append("--user=%s" % client_user) - else: - module.fail_json(msg="User %s not found in client config" % - client_user) - - client_cluster = module.params['client_cluster'] - if config.has_cluster(client_cluster): - if client_cluster != config.get_cluster_for_context(client_cluster): - client_opts.append("--cluster=%s" % client_cluster) - else: - module.fail_json(msg="Cluster %s not found in client config" % - client_cluster) - - # TODO: provide sane defaults for some (like hostname, externalIP, - # internalIP, etc) - node = Node(module, client_opts, module.params['api_version'], - module.params['name'], module.params['host_ip'], - module.params['hostnames'], module.params['external_ips'], - module.params['internal_ips'], module.params['cpu'], - module.params['memory'], module.params['labels'], - module.params['annotations'], module.params['pod_cidr'], - module.params['external_id']) - - # TODO: attempt to support changing node settings where possible and/or - # modifying node resources - if node.exists(): - module.exit_json(changed=False, node=node.get_node()) - elif module.check_mode: - module.exit_json(changed=True, node=node.get_node()) - else: - if node.create(): - module.exit_json(changed=True, - msg="Node created successfully", - node=node.get_node()) - else: - module.fail_json(msg="Unknown error creating node", - node=node.get_node()) - - -# import module snippets -from ansible.module_utils.basic import * -if __name__ == '__main__': - main() diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e380ba1fb..c039e3f05 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,27 +1,29 @@ --- +- name: Test if node certs and config exist + stat: path={{ item }} + failed_when: not result.stat.exists + register: result + with_items: + - "{{ cert_path }}" + - "{{ cert_path }}/cert.crt" + - "{{ cert_path }}/key.key" + - "{{ cert_path }}/.kubeconfig" + - "{{ cert_path }}/server.crt" + - "{{ cert_path }}/server.key" + - "{{ cert_parent_path }}/ca/cert.crt" + #- "{{ cert_path }}/node.yaml" + - name: Install OpenShift Node package yum: pkg=openshift-node state=installed -- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - -- name: Retrieve OpenShift Master credentials - local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }} - ignore_errors: yes - -- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory - -- name: Store OpenShift Master credentials - local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin - ignore_errors: yes - -- local_action: file name={{ mktemp.stdout }} state=absent - +# --create-certs=false is a temporary workaround until +# https://github.com/openshift/origin/pull/1361 is merged upstream and it is +# the default for nodes - name: Configure OpenShift Node settings lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }}\"" + line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\"" notify: - restart openshift-node @@ -47,42 +49,10 @@ option: externally_managed value: "{{ openshift_node_manage_service_externally }}" -# fixme: Once the openshift_cluster playbook is published state should be started -# Always bounce service to pick up new credentials - name: Start and enable openshift-node - service: name=openshift-node enabled=yes state=restarted + service: name=openshift-node enabled=yes state=started when: not openshift_node_manage_service_externally - name: Disable openshift-node if openshift-node is managed externally service: name=openshift-node enabled=false when: openshift_node_manage_service_externally - -# TODO: create an os_vars role that has generic env related config and move -# the root kubeconfig setting there, cannot use dependencies to force ordering -# with openshift_node and openshift_master because the way conditional -# dependencies work with current ansible would also exclude the -# openshift_common dependency. -- name: Create .kube directory - file: - path: /root/.kube - state: directory - mode: 0700 -- name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig - args: - creates: /root/.kube/.kubeconfig - -- name: Register node (if not already registered) - openshift_register_node: - name: "{{ openshift_hostname }}" - api_version: v1beta1 - cpu: "{{ openshift_node_resources.cpu }}" - memory: "{{ openshift_node_resources.memory }}" - pod_cidr: "{{ openshift_node_resources.cidr }}" - host_ip: "{{ ansible_default_ipv4.address }}" - labels: "{{ openshift_node_labels }}" - annotations: "{{ openshift_node_annotations }}" - # TODO: support customizing other attributes such as: client_config, - # client_cluster, client_context, client_user - # TODO: updated for v1beta3 changes after rebase: hostnames, external_ips, - # internal_ips, external_id diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/roles/openshift_register_nodes/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml new file mode 100644 index 000000000..3501e8922 --- /dev/null +++ b/roles/openshift_register_nodes/defaults/main.yml @@ -0,0 +1,5 @@ +--- +openshift_kube_api_version: v1beta1 +openshift_cert_dir: openshift.local.certificates +openshift_cert_dir_parent: /var/lib/openshift +openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}" diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py new file mode 100644 index 000000000..409215616 --- /dev/null +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +import os +import multiprocessing +import socket +from subprocess import check_output, Popen +from decimal import * + +DOCUMENTATION = ''' +--- +module: kubernetes_register_node +short_description: Registers a kubernetes node with a master +description: + - Registers a kubernetes node with a master +options: + name: + default: null + description: + - Identifier for this node (usually the node fqdn). + required: true + api_verison: + choices: ['v1beta1', 'v1beta3'] + default: 'v1beta1' + description: + - Kubernetes API version to use + required: true + host_ip: + default: null + description: + - IP Address to associate with the node when registering. + Available in the following API versions: v1beta1. + required: false + hostnames: + default: [] + description: + - Valid hostnames for this node. Available in the following API + versions: v1beta3. + required: false + external_ips: + default: [] + description: + - External IP Addresses for this node. Available in the following API + versions: v1beta3. + required: false + internal_ips: + default: [] + description: + - Internal IP Addresses for this node. Available in the following API + versions: v1beta3. + required: false + cpu: + default: null + description: + - Number of CPUs to allocate for this node. When using the v1beta1 + API, you must specify the CPU count as a floating point number + with no more than 3 decimal places. API version v1beta3 and newer + accepts arbitrary float values. + required: false + memory: + default: null + description: + - Memory available for this node. When using the v1beta1 API, you + must specify the memory size in bytes. API version v1beta3 and + newer accepts binary SI and decimal SI values. + required: false +''' +EXAMPLES = ''' +# Minimal node registration +- openshift_register_node: name=ose3.node.example.com + +# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of +# Memory +- openshift_register_node: + name: ose3.node.example.com + api_version: v1beta1 + hostIP: 192.168.1.1 + cpu: 1 + memory: 500000000 + +# Node registration using the v1beta3 API, setting an alternate hostname, +# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory +- openshift_register_node: + name: ose3.node.example.com + api_version: v1beta3 + external_ips: ['192.168.1.5'] + internal_ips: ['10.0.0.5'] + hostnames: ['ose2.node.internal.local'] + cpu: 3.5 + memory: 1Ti +''' + + +class ClientConfigException(Exception): + pass + +class ClientConfig: + def __init__(self, client_opts, module): + _, output, error = module.run_command(["/usr/bin/openshift", "ex", + "config", "view", "-o", + "json"] + client_opts, + check_rc = True) + self.config = json.loads(output) + + if not (bool(self.config['clusters']) or + bool(self.config['contexts']) or + bool(self.config['current-context']) or + bool(self.config['users'])): + raise ClientConfigException(msg="Client config missing required " \ + "values", + output=output) + + def current_context(self): + return self.config['current-context'] + + def section_has_value(self, section_name, value): + section = self.config[section_name] + if isinstance(section, dict): + return value in section + else: + val = next((item for item in section + if item['name'] == value), None) + return val is not None + + def has_context(self, context): + return self.section_has_value('contexts', context) + + def has_user(self, user): + return self.section_has_value('users', user) + + def has_cluster(self, cluster): + return self.section_has_value('clusters', cluster) + + def get_value_for_context(self, context, attribute): + contexts = self.config['contexts'] + if isinstance(contexts, dict): + return contexts[context][attribute] + else: + return next((c['context'][attribute] for c in contexts + if c['name'] == context), None) + + def get_user_for_context(self, context): + return self.get_value_for_context(context, 'user') + + def get_cluster_for_context(self, context): + return self.get_value_for_context(context, 'cluster') + +class Util: + @staticmethod + def remove_empty_elements(mapping): + if isinstance(mapping, dict): + m = mapping.copy() + for key, val in mapping.iteritems(): + if not val: + del m[key] + return m + else: + return mapping + +class NodeResources: + def __init__(self, version, cpu=None, memory=None): + if version == 'v1beta1': + self.resources = dict(capacity=dict()) + self.resources['capacity']['cpu'] = cpu + self.resources['capacity']['memory'] = memory + + def get_resources(self): + return Util.remove_empty_elements(self.resources) + +class NodeSpec: + def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): + if version == 'v1beta3': + self.spec = dict(podCIDR=cidr, externalID=externalID, + capacity=dict()) + self.spec['capacity']['cpu'] = cpu + self.spec['capacity']['memory'] = memory + + def get_spec(self): + return Util.remove_empty_elements(self.spec) + +class NodeStatus: + def addAddresses(self, addressType, addresses): + addressList = [] + for address in addresses: + addressList.append(dict(type=addressType, address=address)) + return addressList + + def __init__(self, version, externalIPs = [], internalIPs = [], + hostnames = []): + if version == 'v1beta3': + self.status = dict(addresses = addAddresses('ExternalIP', + externalIPs) + + addAddresses('InternalIP', + internalIPs) + + addAddresses('Hostname', + hostnames)) + + def get_status(self): + return Util.remove_empty_elements(self.status) + +class Node: + def __init__(self, module, client_opts, version='v1beta1', name=None, + hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], + cpu=None, memory=None, labels=dict(), annotations=dict(), + podCIDR=None, externalID=None): + self.module = module + self.client_opts = client_opts + if version == 'v1beta1': + self.node = dict(id = name, + kind = 'Node', + apiVersion = version, + hostIP = hostIP, + resources = NodeResources(version, cpu, memory), + cidr = podCIDR, + labels = labels, + annotations = annotations + ) + elif version == 'v1beta3': + metadata = dict(name = name, + labels = labels, + annotations = annotations + ) + self.node = dict(kind = 'Node', + apiVersion = version, + metadata = metadata, + spec = NodeSpec(version, cpu, memory, podCIDR, + externalID), + status = NodeStatus(version, externalIPs, + internalIPs, hostnames), + ) + + def get_name(self): + if self.node['apiVersion'] == 'v1beta1': + return self.node['id'] + elif self.node['apiVersion'] == 'v1beta3': + return self.node['name'] + + def get_node(self): + node = self.node.copy() + if self.node['apiVersion'] == 'v1beta1': + node['resources'] = self.node['resources'].get_resources() + elif self.node['apiVersion'] == 'v1beta3': + node['spec'] = self.node['spec'].get_spec() + node['status'] = self.node['status'].get_status() + return Util.remove_empty_elements(node) + + def exists(self): + _, output, error = self.module.run_command(["/usr/bin/osc", "get", + "nodes"] + self.client_opts, + check_rc = True) + if re.search(self.module.params['name'], output, re.MULTILINE): + return True + return False + + def create(self): + cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] + rc, output, error = self.module.run_command(cmd, + data=self.module.jsonify(self.get_node())) + if rc != 0: + if re.search("minion \"%s\" already exists" % self.get_name(), + error): + self.module.exit_json(changed=False, + msg="node definition already exists", + node=self.get_node()) + else: + self.module.fail_json(msg="Node creation failed.", rc=rc, + output=output, error=error, + node=self.get_node()) + else: + return True + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required = True, type = 'str'), + host_ip = dict(type = 'str'), + hostnames = dict(type = 'list', default = []), + external_ips = dict(type = 'list', default = []), + internal_ips = dict(type = 'list', default = []), + api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 + choices = ['v1beta1', 'v1beta3']), + cpu = dict(type = 'str'), + memory = dict(type = 'str'), + labels = dict(type = 'dict', default = {}), # TODO: needs documented + annotations = dict(type = 'dict', default = {}), # TODO: needs documented + pod_cidr = dict(type = 'str'), # TODO: needs documented + external_id = dict(type = 'str'), # TODO: needs documented + client_config = dict(type = 'str'), # TODO: needs documented + client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented + client_context = dict(type = 'str', default = 'master'), # TODO: needs documented + client_user = dict(type = 'str', default = 'admin') # TODO: needs documented + ), + mutually_exclusive = [ + ['host_ip', 'external_ips'], + ['host_ip', 'internal_ips'], + ['host_ip', 'hostnames'], + ], + supports_check_mode=True + ) + + user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig')) + if not (user_has_client_config or module.params['client_config']): + module.fail_json(msg="Could not locate client configuration, " + "client_config must be specified if " + "~/.kube/.kubeconfig is not present") + + client_opts = [] + if module.params['client_config']: + client_opts.append("--kubeconfig=%s" % module.params['client_config']) + + try: + config = ClientConfig(client_opts, module) + except ClientConfigException as e: + module.fail_json(msg="Failed to get client configuration", exception=e) + + client_context = module.params['client_context'] + if config.has_context(client_context): + if client_context != config.current_context(): + client_opts.append("--context=%s" % client_context) + else: + module.fail_json(msg="Context %s not found in client config" % + client_context) + + client_user = module.params['client_user'] + if config.has_user(client_user): + if client_user != config.get_user_for_context(client_context): + client_opts.append("--user=%s" % client_user) + else: + module.fail_json(msg="User %s not found in client config" % + client_user) + + client_cluster = module.params['client_cluster'] + if config.has_cluster(client_cluster): + if client_cluster != config.get_cluster_for_context(client_cluster): + client_opts.append("--cluster=%s" % client_cluster) + else: + module.fail_json(msg="Cluster %s not found in client config" % + client_cluster) + + # TODO: provide sane defaults for some (like hostname, externalIP, + # internalIP, etc) + node = Node(module, client_opts, module.params['api_version'], + module.params['name'], module.params['host_ip'], + module.params['hostnames'], module.params['external_ips'], + module.params['internal_ips'], module.params['cpu'], + module.params['memory'], module.params['labels'], + module.params['annotations'], module.params['pod_cidr'], + module.params['external_id']) + + # TODO: attempt to support changing node settings where possible and/or + # modifying node resources + if node.exists(): + module.exit_json(changed=False, node=node.get_node()) + elif module.check_mode: + module.exit_json(changed=True, node=node.get_node()) + else: + if node.create(): + module.exit_json(changed=True, + msg="Node created successfully", + node=node.get_node()) + else: + module.fail_json(msg="Unknown error creating node", + node=node.get_node()) + + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml new file mode 100644 index 000000000..7b1f0ef0a --- /dev/null +++ b/roles/openshift_register_nodes/meta/main.yml @@ -0,0 +1,128 @@ +--- +galaxy_info: + author: your name + description: + company: your company (optional) + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + #- name: EL + # versions: + # - all + # - 5 + # - 6 + # - 7 + #- name: GenericUNIX + # versions: + # - all + # - any + #- name: Fedora + # versions: + # - all + # - 16 + # - 17 + # - 18 + # - 19 + # - 20 + #- name: SmartOS + # versions: + # - all + # - any + #- name: opensuse + # versions: + # - all + # - 12.1 + # - 12.2 + # - 12.3 + # - 13.1 + # - 13.2 + #- name: Amazon + # versions: + # - all + # - 2013.03 + # - 2013.09 + #- name: GenericBSD + # versions: + # - all + # - any + #- name: FreeBSD + # versions: + # - all + # - 8.0 + # - 8.1 + # - 8.2 + # - 8.3 + # - 8.4 + # - 9.0 + # - 9.1 + # - 9.1 + # - 9.2 + #- name: Ubuntu + # versions: + # - all + # - lucid + # - maverick + # - natty + # - oneiric + # - precise + # - quantal + # - raring + # - saucy + # - trusty + #- name: SLES + # versions: + # - all + # - 10SP3 + # - 10SP4 + # - 11 + # - 11SP1 + # - 11SP2 + # - 11SP3 + #- name: GenericLinux + # versions: + # - all + # - any + #- name: Debian + # versions: + # - all + # - etch + # - lenny + # - squeeze + # - wheezy + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + #- cloud + #- cloud:ec2 + #- cloud:gce + #- cloud:rax + #- clustering + #- database + #- database:nosql + #- database:sql + #- development + #- monitoring + #- networking + #- packaging + #- system + #- web +dependencies: [] + # List your role dependencies here, one per line. Only + # dependencies available via galaxy should be listed here. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml new file mode 100644 index 000000000..59216fc87 --- /dev/null +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# TODO: support configuration for multiple masters, currently hardcoding +# the info from the first master + +# TODO: create a failed_when condition +- name: Create node server certificates + command: > + /usr/bin/openshift admin create-server-cert + --overwrite=false + --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key + --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt" + with_items: openshift_nodes + register: server_cert_result + +# TODO: create a failed_when condition +- name: Create node client certificates + command: > + /usr/bin/openshift admin create-node-cert + --overwrite=false + --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key + --node-name={{ item.openshift_node_hostname }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt" + with_items: openshift_nodes + register: node_cert_result + +# TODO: re-create kubeconfig if certs were regenerated, not just if +# .kubeconfig doesn't exist +# TODO: create a failed_when condition +- name: Create kubeconfigs for nodes + command: > + /usr/bin/openshift admin create-kubeconfig + --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt + --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key + --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig + --master={{ openshift_master_urls[0] }} + --public-master={{ openshift_master_public_urls[0] }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig" + with_items: openshift_nodes + register: kubeconfig_result + +# TODO: generate the node configs (openshift start node --write-config +# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml' +# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig' +# will need to modify the generated node config as needed +# (servingInfo.{certFile,clientCA,keyFile}) + +- name: Register unregistered nodes + kubernetes_register_node: + name: "{{ item.openshift_node_name }}" + api_version: "{{ openshift_kube_api_version }}" + cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}" + memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}" + pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}" + host_ip: "{{ item.openshift_node_host_ip }}" + labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}" + annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}" + # TODO: support customizing other attributes such as: client_config, + # client_cluster, client_context, client_user + # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, + # internal_ips, external_id + with_items: openshift_nodes + register: register_result diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 294550219..33197c241 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -29,7 +29,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ -- cgit v1.2.3 From de1391db4309f020b5c8467597eef527b560bbaa Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 23:36:08 -0400 Subject: remove openshift_hostname_workaround var for openshift_common, rather rely on inventory/playbook variables for openshift_hostname --- roles/openshift_common/README.md | 1 - roles/openshift_common/defaults/main.yml | 5 ----- roles/openshift_master/README.md | 1 - roles/openshift_node/README.md | 1 - roles/openshift_sdn_node/README.md | 1 - 5 files changed, 9 deletions(-) (limited to 'roles') diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index 592a276f9..880d66e2c 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -15,7 +15,6 @@ Role Variables | Name | Default value | | |-------------------------------|------------------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_env | default | Envrionment name if multiple OpenShift instances | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index 86351f6f6..22b2c6ffd 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,7 +1,2 @@ --- openshift_debug_level: 0 - -# TODO: Once openshift stops resolving hostnames for node queries remove -# this... -openshift_hostname_workaround: true - diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 2f03b4990..2d898bc3b 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -25,7 +25,6 @@ From openshift_common: | Name | Default Value | | |-------------------------------|---------------------|---------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index d537a35a5..c9b4eab34 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -26,7 +26,6 @@ From openshift_common: | Name | Default Value | | |-------------------------------|---------------------|---------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 33197c241..2da2d74eb 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -27,7 +27,6 @@ From openshift_common: | Name | Default value | | |-------------------------------|---------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | -- cgit v1.2.3 From 41740bc6e177e58a0aa817e2d940e60be51d3bfe Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Tue, 24 Mar 2015 09:43:36 -0700 Subject: Revert "Jwhonce wip/cluster" --- roles/docker/tasks/main.yml | 2 +- roles/os_env_extras_node/README.md | 38 ------- roles/os_env_extras_node/files/enter-container.sh | 13 --- roles/os_env_extras_node/meta/main.yml | 124 ---------------------- roles/os_env_extras_node/tasks/main.yml | 7 -- roles/os_firewall/tasks/firewall/iptables.yml | 8 ++ 6 files changed, 9 insertions(+), 183 deletions(-) delete mode 100644 roles/os_env_extras_node/README.md delete mode 100755 roles/os_env_extras_node/files/enter-container.sh delete mode 100644 roles/os_env_extras_node/meta/main.yml delete mode 100644 roles/os_env_extras_node/tasks/main.yml (limited to 'roles') diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 593c4c877..ca700db17 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,7 +1,7 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker + yum: pkg=docker-io - name: enable and start the docker service service: name=docker enabled=yes state=started diff --git a/roles/os_env_extras_node/README.md b/roles/os_env_extras_node/README.md deleted file mode 100644 index 225dd44b9..000000000 --- a/roles/os_env_extras_node/README.md +++ /dev/null @@ -1,38 +0,0 @@ -Role Name -========= - -A brief description of the role goes here. - -Requirements ------------- - -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. - -Role Variables --------------- - -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. - -Dependencies ------------- - -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -BSD - -Author Information ------------------- - -An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/os_env_extras_node/files/enter-container.sh b/roles/os_env_extras_node/files/enter-container.sh deleted file mode 100755 index 7cf5b8d83..000000000 --- a/roles/os_env_extras_node/files/enter-container.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ $# -ne 1 ] -then - echo - echo "Usage: $(basename $0) " - echo - exit 1 -fi - -PID=$(docker inspect --format '{{.State.Pid}}' $1) - -nsenter --target $PID --mount --uts --ipc --net --pid diff --git a/roles/os_env_extras_node/meta/main.yml b/roles/os_env_extras_node/meta/main.yml deleted file mode 100644 index c5c362c60..000000000 --- a/roles/os_env_extras_node/meta/main.yml +++ /dev/null @@ -1,124 +0,0 @@ ---- -galaxy_info: - author: your name - description: - company: your company (optional) - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - min_ansible_version: 1.2 - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - #- name: EL - # versions: - # - all - # - 5 - # - 6 - # - 7 - #- name: GenericUNIX - # versions: - # - all - # - any - #- name: Fedora - # versions: - # - all - # - 16 - # - 17 - # - 18 - # - 19 - # - 20 - #- name: opensuse - # versions: - # - all - # - 12.1 - # - 12.2 - # - 12.3 - # - 13.1 - # - 13.2 - #- name: Amazon - # versions: - # - all - # - 2013.03 - # - 2013.09 - #- name: GenericBSD - # versions: - # - all - # - any - #- name: FreeBSD - # versions: - # - all - # - 8.0 - # - 8.1 - # - 8.2 - # - 8.3 - # - 8.4 - # - 9.0 - # - 9.1 - # - 9.1 - # - 9.2 - #- name: Ubuntu - # versions: - # - all - # - lucid - # - maverick - # - natty - # - oneiric - # - precise - # - quantal - # - raring - # - saucy - # - trusty - #- name: SLES - # versions: - # - all - # - 10SP3 - # - 10SP4 - # - 11 - # - 11SP1 - # - 11SP2 - # - 11SP3 - #- name: GenericLinux - # versions: - # - all - # - any - #- name: Debian - # versions: - # - all - # - etch - # - lenny - # - squeeze - # - wheezy - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - #- cloud - #- cloud:ec2 - #- cloud:gce - #- cloud:rax - #- clustering - #- database - #- database:nosql - #- database:sql - #- development - #- monitoring - #- networking - #- packaging - #- system - #- web -dependencies: [] - # List your role dependencies here, one per line. Only - # dependencies available via galaxy should be listed here. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml deleted file mode 100644 index 065f71f74..000000000 --- a/roles/os_env_extras_node/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755 - -# From the origin rpm there exists instructions on how to -# setup origin properly. The following steps come from there -- name: Change root to be in the Docker group - user: name=root groups=dockerroot append=yes diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..72a3401cf 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,6 +41,14 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 +# Workaround for Docker 1.4 to create DOCKER chain +- name: Add DOCKER chain + os_firewall_manage_iptables: + name: "DOCKER chain" + action: verify_chain + create_jump_rule: no +# End of Docker 1.4 workaround + - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From 4dc8ca74f47bcbe0fd6285b0d73cc5b193be17a9 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Tue, 24 Mar 2015 12:40:21 -0700 Subject: * Remove DOCKER chain work around --- roles/os_firewall/tasks/firewall/iptables.yml | 8 -------- 1 file changed, 8 deletions(-) (limited to 'roles') diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 72a3401cf..87e77c083 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,14 +41,6 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -# Workaround for Docker 1.4 to create DOCKER chain -- name: Add DOCKER chain - os_firewall_manage_iptables: - name: "DOCKER chain" - action: verify_chain - create_jump_rule: no -# End of Docker 1.4 workaround - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From 4712e72c912a1102bff0508c98bd97da3f33ae95 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 23:53:17 -0400 Subject: openshift_facts role/module refactor default settings - Add openshift_facts role and module - Created new role openshift_facts that contains an openshift_facts module - Refactor openshift_* roles to use openshift_facts instead of relying on defaults - Refactor playbooks to use openshift_facts - Cleanup inventory group_vars - Update defaults - update openshift_master role firewall defaults - remove etcd peer port, since we will not be supporting clustered embedded etcd - remove 8444 since console now runs on the api port by default - add 8444 and 7001 to disabled services to ensure removal if updating - Add new role os_env_extras_node that is a subset of the docker role - previously, we were starting/enabling docker which was causing issues with some installations - Does not install or start docker, since the openshift-node role will handle that for us - Only adds root to the dockerroot group - Update playbooks to use ops_env_extras_node role instead of docker role - os_firewall bug fixes - ignore ip6tables for now, since we are not configuring any ipv6 rules - if installing package do a daemon-reload before starting/enabling service - Add aws support to bin/cluster - Add list action to bin/cluster - Add update action to bin/cluster - cleanup some stray debug statements - some variable renaming for clarity --- roles/openshift_common/README.md | 17 +- roles/openshift_common/defaults/main.yml | 1 + roles/openshift_common/meta/main.yml | 1 + roles/openshift_common/tasks/main.yml | 29 +- roles/openshift_common/tasks/set_facts.yml | 9 - roles/openshift_common/vars/main.yml | 5 +- roles/openshift_facts/README.md | 34 ++ roles/openshift_facts/library/openshift_facts.py | 482 +++++++++++++++++++++ roles/openshift_facts/meta/main.yml | 15 + roles/openshift_facts/tasks/main.yml | 3 + roles/openshift_master/README.md | 28 +- roles/openshift_master/defaults/main.yml | 13 +- roles/openshift_master/handlers/main.yml | 1 - roles/openshift_master/tasks/main.yml | 50 ++- roles/openshift_master/vars/main.yml | 2 - roles/openshift_node/README.md | 3 - roles/openshift_node/defaults/main.yml | 2 - roles/openshift_node/handlers/main.yml | 2 +- roles/openshift_node/tasks/main.yml | 27 +- roles/openshift_node/vars/main.yml | 2 - roles/openshift_register_nodes/README.md | 22 +- .../library/kubernetes_register_node.py | 3 +- roles/openshift_register_nodes/meta/main.yml | 141 +----- roles/openshift_register_nodes/tasks/main.yml | 58 ++- roles/openshift_repos/defaults/main.yaml | 2 + roles/openshift_repos/meta/main.yml | 3 +- roles/openshift_repos/tasks/main.yaml | 6 + roles/openshift_sdn_master/defaults/main.yml | 2 - roles/openshift_sdn_master/meta/main.yml | 3 +- roles/openshift_sdn_master/tasks/main.yml | 18 +- roles/openshift_sdn_node/README.md | 6 - roles/openshift_sdn_node/defaults/main.yml | 2 - roles/openshift_sdn_node/meta/main.yml | 3 +- roles/openshift_sdn_node/tasks/main.yml | 23 +- roles/os_env_extras_node/tasks/main.yml | 5 + .../library/os_firewall_manage_iptables.py | 1 + roles/os_firewall/meta/main.yml | 1 + roles/os_firewall/tasks/firewall/firewalld.yml | 5 + roles/os_firewall/tasks/firewall/iptables.yml | 12 +- 39 files changed, 729 insertions(+), 313 deletions(-) delete mode 100644 roles/openshift_common/tasks/set_facts.yml create mode 100644 roles/openshift_facts/README.md create mode 100755 roles/openshift_facts/library/openshift_facts.py create mode 100644 roles/openshift_facts/meta/main.yml create mode 100644 roles/openshift_facts/tasks/main.yml delete mode 100644 roles/openshift_master/vars/main.yml delete mode 100644 roles/openshift_node/vars/main.yml mode change 100644 => 100755 roles/openshift_register_nodes/library/kubernetes_register_node.py delete mode 100644 roles/openshift_sdn_master/defaults/main.yml delete mode 100644 roles/openshift_sdn_node/defaults/main.yml create mode 100644 roles/os_env_extras_node/tasks/main.yml mode change 100644 => 100755 roles/os_firewall/library/os_firewall_manage_iptables.py (limited to 'roles') diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index 880d66e2c..14c2037e4 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -12,17 +12,20 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos. Role Variables -------------- -| Name | Default value | | -|-------------------------------|------------------------------|----------------------------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_env | default | Envrionment name if multiple OpenShift instances | +| Name | Default value | | +|---------------------------|-------------------|---------------------------------------------| +| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters | +| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) | +| openshift_ip | UNDEF | Internal IP address to use for this host | +| openshift_public_hostname | UNDEF | Public hostname to use for this host | +| openshift_public_ip | UNDEF | Public IP address to use for this host | Dependencies ------------ os_firewall +openshift_facts openshift_repos Example Playbook @@ -38,4 +41,4 @@ Apache License, Version 2.0 Author Information ------------------ -TODO +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index 22b2c6ffd..4d3e0fe9e 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,2 +1,3 @@ --- +openshift_cluster_id: 'default' openshift_debug_level: 0 diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml index cee4dd337..81363ec68 100644 --- a/roles/openshift_common/meta/main.yml +++ b/roles/openshift_common/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info: - cloud dependencies: - { role: os_firewall } +- { role: openshift_facts } - { role: openshift_repos } diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..941190534 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,19 +1,16 @@ --- -- name: Set hostname - hostname: name={{ openshift_hostname }} +- name: Set common OpenShift facts + openshift_facts: + role: 'common' + local_facts: + cluster_id: "{{ openshift_cluster_id | default('default') }}" + debug_level: "{{ openshift_debug_level | default(0) }}" + hostname: "{{ openshift_hostname | default(None) }}" + ip: "{{ openshift_ip | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" -- name: Configure local facts file - file: path=/etc/ansible/facts.d/ state=directory mode=0750 +- name: Set hostname + hostname: name={{ openshift.common.hostname }} -- name: Set common OpenShift facts - include: set_facts.yml - facts: - - section: common - option: env - value: "{{ openshift_env | default('default') }}" - - section: common - option: host_type - value: "{{ openshift_host_type }}" - - section: common - option: debug_level - value: "{{ openshift_debug_level }}" diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml deleted file mode 100644 index 349eecd1d..000000000 --- a/roles/openshift_common/tasks/set_facts.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: "Setting local_facts" - ini_file: - dest: /etc/ansible/facts.d/openshift.fact - mode: 0640 - section: "{{ item.section }}" - option: "{{ item.option }}" - value: "{{ item.value }}" - with_items: facts diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 623aed9bf..50816d319 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -1,6 +1,7 @@ --- -openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/ - # TODO: Upstream kubernetes only supports iptables currently, if this changes, # then these variable should be moved to defaults +# TODO: it might be possible to still use firewalld if we wire up the created +# chains with the public zone (or the zone associated with the correct +# interfaces) os_firewall_use_firewalld: False diff --git a/roles/openshift_facts/README.md b/roles/openshift_facts/README.md new file mode 100644 index 000000000..2fd50e236 --- /dev/null +++ b/roles/openshift_facts/README.md @@ -0,0 +1,34 @@ +OpenShift Facts +=============== + +Provides the openshift_facts module + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py new file mode 100755 index 000000000..0dd343443 --- /dev/null +++ b/roles/openshift_facts/library/openshift_facts.py @@ -0,0 +1,482 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +DOCUMENTATION = ''' +--- +module: openshift_facts +short_description: OpenShift Facts +author: Jason DeTiberus +requirements: [ ] +''' +EXAMPLES = ''' +''' + +import ConfigParser +import copy + +class OpenShiftFactsUnsupportedRoleError(Exception): + pass + +class OpenShiftFactsFileWriteError(Exception): + pass + +class OpenShiftFacts(): + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn'] + + def __init__(self, role, filename, local_facts): + self.changed = False + self.filename = filename + if role not in self.known_roles: + raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role) + self.role = role + self.facts = self.generate_facts(local_facts) + + def generate_facts(self, local_facts): + local_facts = self.init_local_facts(local_facts) + roles = local_facts.keys() + + defaults = self.get_defaults(roles) + provider_facts = self.init_provider_facts() + facts = self.apply_provider_facts(defaults, provider_facts, roles) + + facts = self.merge_facts(facts, local_facts) + facts['current_config'] = self.current_config(facts) + self.set_url_facts_if_unset(facts) + return dict(openshift=facts) + + + def set_url_facts_if_unset(self, facts): + if 'master' in facts: + for (url_var, use_ssl, port, default) in [ + ('api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['hostname']), + ('public_api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['public_hostname']), + ('console_url', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['hostname']), + ('public_console_url' 'console_use_ssl', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['public_hostname'])]: + if url_var not in facts['master']: + scheme = 'https' if use_ssl else 'http' + netloc = default + if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'): + netloc = "%s:%s" % (netloc, port) + facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', '')) + + + # Query current OpenShift config and return a dictionary containing + # settings that may be valuable for determining actions that need to be + # taken in the playbooks/roles + def current_config(self, facts): + current_config=dict() + roles = [ role for role in facts if role not in ['common','provider'] ] + for role in roles: + if 'roles' in current_config: + current_config['roles'].append(role) + else: + current_config['roles'] = [role] + + # TODO: parse the /etc/sysconfig/openshift-{master,node} config to + # determine the location of files. + + # Query kubeconfig settings + kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' + if role == 'node': + kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname']) + + kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') + if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path): + try: + _, output, error = module.run_command(["/usr/bin/openshift", "ex", + "config", "view", "-o", + "json", + "--kubeconfig=%s" % kubeconfig_path], + check_rc=False) + config = json.loads(output) + + try: + for cluster in config['clusters']: + config['clusters'][cluster]['certificate-authority-data'] = 'masked' + except KeyError: + pass + try: + for user in config['users']: + config['users'][user]['client-certificate-data'] = 'masked' + config['users'][user]['client-key-data'] = 'masked' + except KeyError: + pass + + current_config['kubeconfig'] = config + except Exception: + pass + + return current_config + + + def apply_provider_facts(self, facts, provider_facts, roles): + if not provider_facts: + return facts + + use_openshift_sdn = provider_facts.get('use_openshift_sdn') + if isinstance(use_openshift_sdn, bool): + facts['common']['use_openshift_sdn'] = use_openshift_sdn + + common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] + for h_var, ip_var in common_vars: + ip_value = provider_facts['network'].get(ip_var) + if ip_value: + facts['common'][ip_var] = ip_value + + facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var]) + + if 'node' in roles: + ext_id = provider_facts.get('external_id') + if ext_id: + facts['node']['external_id'] = ext_id + + facts['provider'] = provider_facts + return facts + + def hostname_valid(self, hostname): + if (not hostname or + hostname.startswith('localhost') or + hostname.endswith('localdomain') or + len(hostname.split('.')) < 2): + return False + + return True + + def choose_hostname(self, hostnames=[], fallback=''): + hostname = fallback + + ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ] + hosts = [ i for i in hostnames if i is not None and i not in set(ips) ] + + for host_list in (hosts, ips): + for h in host_list: + if self.hostname_valid(h): + return h + + return hostname + + def get_defaults(self, roles): + hardware_facts = self.get_hardware_facts() + net_facts = self.get_net_facts() + base_facts = self.get_base_facts() + + defaults = dict() + + common = dict(use_openshift_sdn=True) + ip = net_facts['default_ipv4']['address'] + common['ip'] = ip + common['public_ip'] = ip + + rc, output, error = module.run_command(['hostname', '-f']) + hostname_f = output.strip() if rc == 0 else '' + hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']] + hostname = self.choose_hostname(hostname_values) + + common['hostname'] = hostname + common['public_hostname'] = hostname + defaults['common'] = common + + if 'master' in roles: + # TODO: provide for a better way to override just the port, or just + # the urls, instead of forcing both, also to override the hostname + # without having to re-generate these urls later + master = dict(api_use_ssl=True, api_port='8443', + console_use_ssl=True, console_path='/console', + console_port='8443', etcd_use_ssl=False, + etcd_port='4001') + defaults['master'] = master + + if 'node' in roles: + node = dict(external_id=common['hostname'], pod_cidr='', + labels={}, annotations={}) + node['resources_cpu'] = hardware_facts['processor_cores'] + node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75) + defaults['node'] = node + + return defaults + + def merge_facts(self, orig, new): + facts = dict() + for key, value in orig.iteritems(): + if key in new: + if isinstance(value, dict): + facts[key] = self.merge_facts(value, new[key]) + else: + facts[key] = copy.copy(new[key]) + else: + facts[key] = copy.deepcopy(value) + new_keys = set(new.keys()) - set(orig.keys()) + for key in new_keys: + facts[key] = copy.deepcopy(new[key]) + return facts + + def query_metadata(self, metadata_url, headers=None, expect_json=False): + r, info = fetch_url(module, metadata_url, headers=headers) + if info['status'] != 200: + module.fail_json(msg='Failed to query metadata', result=r, + info=info) + if expect_json: + return module.from_json(r.read()) + else: + return [line.strip() for line in r.readlines()] + + def walk_metadata(self, metadata_url, headers=None, expect_json=False): + metadata = dict() + + for line in self.query_metadata(metadata_url, headers, expect_json): + if line.endswith('/') and not line == 'public-keys/': + key = line[:-1] + metadata[key]=self.walk_metadata(metadata_url + line, headers, + expect_json) + else: + results = self.query_metadata(metadata_url + line, headers, + expect_json) + if len(results) == 1: + metadata[line] = results.pop() + else: + metadata[line] = results + return metadata + + def get_provider_metadata(self, metadata_url, supports_recursive=False, + headers=None, expect_json=False): + if supports_recursive: + metadata = self.query_metadata(metadata_url, headers, expect_json) + else: + metadata = self.walk_metadata(metadata_url, headers, expect_json) + return metadata + + def get_hardware_facts(self): + if not hasattr(self, 'hardware_facts'): + self.hardware_facts = Hardware().populate() + return self.hardware_facts + + def get_base_facts(self): + if not hasattr(self, 'base_facts'): + self.base_facts = Facts().populate() + return self.base_facts + + def get_virt_facts(self): + if not hasattr(self, 'virt_facts'): + self.virt_facts = Virtual().populate() + return self.virt_facts + + def get_net_facts(self): + if not hasattr(self, 'net_facts'): + self.net_facts = Network(module).populate() + return self.net_facts + + def guess_host_provider(self): + # TODO: cloud provider facts should probably be submitted upstream + virt_facts = self.get_virt_facts() + hardware_facts = self.get_hardware_facts() + product_name = hardware_facts['product_name'] + product_version = hardware_facts['product_version'] + virt_type = virt_facts['virtualization_type'] + virt_role = virt_facts['virtualization_role'] + provider = None + metadata = None + + # TODO: this is not exposed through module_utils/facts.py in ansible, + # need to create PR for ansible to expose it + bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') + if bios_vendor == 'Google': + provider = 'gce' + metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true' + headers = {'Metadata-Flavor': 'Google'} + metadata = self.get_provider_metadata(metadata_url, True, headers, + True) + + # Filter sshKeys and serviceAccounts from gce metadata + metadata['project']['attributes'].pop('sshKeys', None) + metadata['instance'].pop('serviceAccounts', None) + elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): + provider = 'ec2' + metadata_url = 'http://169.254.169.254/latest/meta-data/' + metadata = self.get_provider_metadata(metadata_url) + elif re.search(r'OpenStack', product_name): + provider = 'openstack' + metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' + metadata = self.get_provider_metadata(metadata_url, True, None, True) + ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' + metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) + + # Filter public_keys and random_seed from openstack metadata + metadata.pop('public_keys', None) + metadata.pop('random_seed', None) + return dict(name=provider, metadata=metadata) + + def normalize_provider_facts(self, provider, metadata): + if provider is None or metadata is None: + return {} + + # TODO: test for ipv6_enabled where possible (gce, aws do not support) + # and configure ipv6 facts if available + + # TODO: add support for setting user_data if available + + facts = dict(name=provider, metadata=metadata) + network = dict(interfaces=[], ipv6_enabled=False) + if provider == 'gce': + for interface in metadata['instance']['networkInterfaces']: + int_info = dict(ips=[interface['ip']], network_type=provider) + int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ] + int_info['public_ips'].extend(interface['forwardedIps']) + _, _, network_id = interface['network'].rpartition('/') + int_info['network_id'] = network_id + network['interfaces'].append(int_info) + _, _, zone = metadata['instance']['zone'].rpartition('/') + facts['zone'] = zone + facts['external_id'] = metadata['instance']['id'] + + # Default to no sdn for GCE deployments + facts['use_openshift_sdn'] = False + + # GCE currently only supports a single interface + network['ip'] = network['interfaces'][0]['ips'][0] + network['public_ip'] = network['interfaces'][0]['public_ips'][0] + network['hostname'] = metadata['instance']['hostname'] + + # TODO: attempt to resolve public_hostname + network['public_hostname'] = network['public_ip'] + elif provider == 'ec2': + for interface in sorted(metadata['network']['interfaces']['macs'].values(), + key=lambda x: x['device-number']): + int_info = dict() + var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} + for ips_var, int_var in var_map.iteritems(): + ips = interface[int_var] + int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips + int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic' + int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None + network['interfaces'].append(int_info) + facts['zone'] = metadata['placement']['availability-zone'] + facts['external_id'] = metadata['instance-id'] + + # TODO: actually attempt to determine default local and public ips + # by using the ansible default ip fact and the ipv4-associations + # form the ec2 metadata + network['ip'] = metadata['local-ipv4'] + network['public_ip'] = metadata['public-ipv4'] + + # TODO: verify that local hostname makes sense and is resolvable + network['hostname'] = metadata['local-hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + network['public_hostname'] = metadata['public-hostname'] + elif provider == 'openstack': + # openstack ec2 compat api does not support network interfaces and + # the version tested on did not include the info in the openstack + # metadata api, should be updated if neutron exposes this. + + facts['zone'] = metadata['availability_zone'] + facts['external_id'] = metadata['uuid'] + network['ip'] = metadata['ec2_compat']['local-ipv4'] + network['public_ip'] = metadata['ec2_compat']['public-ipv4'] + + # TODO: verify local hostname makes sense and is resolvable + network['hostname'] = metadata['hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + network['public_hostname'] = metadata['ec2_compat']['public-hostname'] + + facts['network'] = network + return facts + + def init_provider_facts(self): + provider_info = self.guess_host_provider() + provider_facts = self.normalize_provider_facts( + provider_info.get('name'), + provider_info.get('metadata') + ) + return provider_facts + + def get_facts(self): + # TODO: transform facts into cleaner format (openshift_ instead + # of openshift. + return self.facts + + def init_local_facts(self, facts={}): + changed = False + + local_facts = ConfigParser.SafeConfigParser() + local_facts.read(self.filename) + + section = self.role + if not local_facts.has_section(section): + local_facts.add_section(section) + changed = True + + for key, value in facts.iteritems(): + if isinstance(value, bool): + value = str(value) + if not value: + continue + if not local_facts.has_option(section, key) or local_facts.get(section, key) != value: + local_facts.set(section, key, value) + changed = True + + if changed and not module.check_mode: + try: + fact_dir = os.path.dirname(self.filename) + if not os.path.exists(fact_dir): + os.makedirs(fact_dir) + with open(self.filename, 'w') as fact_file: + local_facts.write(fact_file) + except (IOError, OSError) as e: + raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e)) + self.changed = changed + + role_facts = dict() + for section in local_facts.sections(): + role_facts[section] = dict() + for opt, val in local_facts.items(section): + role_facts[section][opt] = val + return role_facts + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + role=dict(default='common', + choices=OpenShiftFacts.known_roles, + required=False), + local_facts=dict(default={}, type='dict', required=False), + ), + supports_check_mode=True, + add_file_common_args=True, + ) + + role = module.params['role'] + local_facts = module.params['local_facts'] + fact_file = '/etc/ansible/facts.d/openshift.fact' + + openshift_facts = OpenShiftFacts(role, fact_file, local_facts) + + file_params = module.params.copy() + file_params['path'] = fact_file + file_args = module.load_file_common_arguments(file_params) + changed = module.set_fs_attributes_if_different(file_args, + openshift_facts.changed) + + return module.exit_json(changed=changed, + ansible_facts=openshift_facts.get_facts()) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.facts import * +from ansible.module_utils.urls import * +main() diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml new file mode 100644 index 000000000..0be3afd24 --- /dev/null +++ b/roles/openshift_facts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: [] diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml new file mode 100644 index 000000000..5a7d10d25 --- /dev/null +++ b/roles/openshift_facts/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Gather OpenShift facts + openshift_facts: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 2d898bc3b..9f9d0a613 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -13,20 +13,24 @@ Role Variables -------------- From this role: -| Name | Default value | -| -|------------------------------------------|-----------------------|----------------------------------------| -| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? | -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | -| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up | -| openshift_registry_url | UNDEF (Optional) | Default docker registry to use | +| Name | Default value | | +|-------------------------------------|-----------------------|--------------------------------------------------| +| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | +| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up | +| openshift_registry_url | UNDEF | Default docker registry to use | +| openshift_master_api_port | UNDEF | | +| openshift_master_console_port | UNDEF | | +| openshift_master_api_url | UNDEF | | +| openshift_master_console_url | UNDEF | | +| openshift_master_public_api_url | UNDEF | | +| openshift_master_public_console_url | UNDEF | | From openshift_common: -| Name | Default Value | | -|-------------------------------|---------------------|---------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | +| Name | Default Value | | +|-------------------------------|----------------|----------------------------------------| +| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_public_ip | UNDEF | Public IP address to use for this host | +| openshift_hostname | UNDEF | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 0159afbb5..87fb347a8 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,16 +1,17 @@ --- -openshift_master_manage_service_externally: false -openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}" openshift_node_ips: [] + +# TODO: update setting these values based on the facts +# TODO: update for console port change os_firewall_allow: - service: etcd embedded port: 4001/tcp -- service: etcd peer - port: 7001/tcp - service: OpenShift api https port: 8443/tcp -- service: OpenShift web console https - port: 8444/tcp os_firewall_deny: - service: OpenShift api http port: 8080/tcp +- service: former OpenShift web console port + port: 8444/tcp +- service: former etcd peer port + port: 7001/tcp diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 503d08d41..6fd4dfb51 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,4 +1,3 @@ --- - name: restart openshift-master service: name=openshift-master state=restarted - when: not openshift_master_manage_service_externally diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 52f5f694c..aa615df39 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -1,19 +1,37 @@ --- -# TODO: allow for overriding default ports where possible -# TODO: if setting up multiple masters, will need to predistribute the certs -# to the additional masters before starting openshift-master +# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl, +# etcd_use_ssl actually change the master config. + +- name: Set master OpenShift facts + openshift_facts: + role: 'master' + local_facts: + debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" - name: Install OpenShift Master package yum: pkg=openshift-master state=installed +# TODO: We should pre-generate the master config and point to the generated +# config rather than setting command line flags here - name: Configure OpenShift settings lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" + line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\"" notify: - restart openshift-master +# TODO: should this be populated by a fact based on the deployment type +# (origin, online, enterprise)? - name: Set default registry url lineinfile: dest: /etc/sysconfig/openshift-master @@ -23,34 +41,18 @@ notify: - restart openshift-master -- name: Set master OpenShift facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: master - option: debug_level - value: "{{ openshift_master_debug_level }}" - - section: master - option: public_ip - value: "{{ openshift_public_ip }}" - - section: master - option: externally_managed - value: "{{ openshift_master_manage_service_externally }}" - - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started - when: not openshift_master_manage_service_externally - register: result - -- name: Disable openshift-master if openshift-master is managed externally - service: name=openshift-master enabled=false - when: openshift_master_manage_service_externally - name: Create .kube directory file: path: /root/.kube state: directory mode: 0700 + +# TODO: Update this file if the contents of the source file are not present in +# the dest file, will need to make sure to ignore things that could be added - name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig + command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig args: creates: /root/.kube/.kubeconfig diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml deleted file mode 100644 index 9a8c4bba2..000000000 --- a/roles/openshift_master/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_host_type: master diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index c9b4eab34..83359f164 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -16,10 +16,7 @@ Role Variables From this role: | Name | Default value | | |------------------------------------------|-----------------------|----------------------------------------| -| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? | | openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node | -| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts | -| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | | openshift_registry_url | UNDEF (Optional) | Default docker registry to use | From openshift_common: diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 6dc73a96e..df7ec41b6 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,4 @@ --- -openshift_node_manage_service_externally: false -openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}" os_firewall_allow: - service: OpenShift kubelet port: 10250/tcp diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index f7aa36d88..ca2992637 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,4 +1,4 @@ --- - name: restart openshift-node service: name=openshift-node state=restarted - when: not openshift_node_manage_service_externally + when: not openshift.common.use_openshift_sdn|bool diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index c039e3f05..8cfef0e15 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,4 +1,12 @@ --- +# TODO: allow for overriding default ports where possible +# TODO: trigger the external service when restart is needed +- name: Set node OpenShift facts + openshift_facts: + role: 'node' + local_facts: + debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" + - name: Test if node certs and config exist stat: path={{ item }} failed_when: not result.stat.exists @@ -23,7 +31,7 @@ lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\"" + line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\"" notify: - restart openshift-node @@ -36,23 +44,10 @@ notify: - restart openshift-node -- name: Set OpenShift node facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: node - option: debug_level - value: "{{ openshift_node_debug_level }}" - - section: node - option: public_ip - value: "{{ openshift_public_ip }}" - - section: node - option: externally_managed - value: "{{ openshift_node_manage_service_externally }}" - - name: Start and enable openshift-node service: name=openshift-node enabled=yes state=started - when: not openshift_node_manage_service_externally + when: not openshift.common.use_openshift_sdn|bool - name: Disable openshift-node if openshift-node is managed externally service: name=openshift-node enabled=false - when: openshift_node_manage_service_externally + when: openshift.common.use_openshift_sdn|bool diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml deleted file mode 100644 index 9841d52f9..000000000 --- a/roles/openshift_node/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_host_type: node diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md index 225dd44b9..b96faa044 100644 --- a/roles/openshift_register_nodes/README.md +++ b/roles/openshift_register_nodes/README.md @@ -1,38 +1,34 @@ -Role Name -========= +OpenShift Register Nodes +======================== -A brief description of the role goes here. +TODO Requirements ------------ -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. +TODO Role Variables -------------- -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. +TODO Dependencies ------------ -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. +TODO Example Playbook ---------------- -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } +TODO License ------- -BSD +Apache License Version 2.0 Author Information ------------------ -An optional section for the role authors to include contact information, or a website (HTML is not allowed). +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py old mode 100644 new mode 100755 index 409215616..8ebeb087a --- a/roles/openshift_register_nodes/library/kubernetes_register_node.py +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -214,7 +214,8 @@ class Node: resources = NodeResources(version, cpu, memory), cidr = podCIDR, labels = labels, - annotations = annotations + annotations = annotations, + externalID = externalID ) elif version == 'v1beta3': metadata = dict(name = name, diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml index 7b1f0ef0a..e40a152c1 100644 --- a/roles/openshift_register_nodes/meta/main.yml +++ b/roles/openshift_register_nodes/meta/main.yml @@ -1,128 +1,17 @@ --- galaxy_info: - author: your name - description: - company: your company (optional) - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - min_ansible_version: 1.2 - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - #- name: EL - # versions: - # - all - # - 5 - # - 6 - # - 7 - #- name: GenericUNIX - # versions: - # - all - # - any - #- name: Fedora - # versions: - # - all - # - 16 - # - 17 - # - 18 - # - 19 - # - 20 - #- name: SmartOS - # versions: - # - all - # - any - #- name: opensuse - # versions: - # - all - # - 12.1 - # - 12.2 - # - 12.3 - # - 13.1 - # - 13.2 - #- name: Amazon - # versions: - # - all - # - 2013.03 - # - 2013.09 - #- name: GenericBSD - # versions: - # - all - # - any - #- name: FreeBSD - # versions: - # - all - # - 8.0 - # - 8.1 - # - 8.2 - # - 8.3 - # - 8.4 - # - 9.0 - # - 9.1 - # - 9.1 - # - 9.2 - #- name: Ubuntu - # versions: - # - all - # - lucid - # - maverick - # - natty - # - oneiric - # - precise - # - quantal - # - raring - # - saucy - # - trusty - #- name: SLES - # versions: - # - all - # - 10SP3 - # - 10SP4 - # - 11 - # - 11SP1 - # - 11SP2 - # - 11SP3 - #- name: GenericLinux - # versions: - # - all - # - any - #- name: Debian - # versions: - # - all - # - etch - # - lenny - # - squeeze - # - wheezy - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - #- cloud - #- cloud:ec2 - #- cloud:gce - #- cloud:rax - #- clustering - #- database - #- database:nosql - #- database:sql - #- development - #- monitoring - #- networking - #- packaging - #- system - #- web -dependencies: [] - # List your role dependencies here, one per line. Only - # dependencies available via galaxy should be listed here. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } + diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 59216fc87..7319b88b1 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -1,18 +1,20 @@ --- -# TODO: support configuration for multiple masters, currently hardcoding -# the info from the first master +# TODO: support new create-config command to generate node certs and config +# TODO: recreate master/node configs if settings that affect the configs +# change (hostname, public_hostname, ip, public_ip, etc) # TODO: create a failed_when condition - name: Create node server certificates command: > /usr/bin/openshift admin create-server-cert --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key - --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }} + --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key + --hostnames={{ [item.openshift.common.hostname, + item.openshift.common.public_hostname]|unique|join(",") }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt" with_items: openshift_nodes register: server_cert_result @@ -21,48 +23,42 @@ command: > /usr/bin/openshift admin create-node-cert --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key - --node-name={{ item.openshift_node_hostname }} + --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key + --node-name={{ item.openshift.common.hostname }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt" with_items: openshift_nodes register: node_cert_result -# TODO: re-create kubeconfig if certs were regenerated, not just if -# .kubeconfig doesn't exist # TODO: create a failed_when condition - name: Create kubeconfigs for nodes command: > /usr/bin/openshift admin create-kubeconfig - --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt - --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key - --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig - --master={{ openshift_master_urls[0] }} - --public-master={{ openshift_master_public_urls[0] }} + --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt + --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key + --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig" with_items: openshift_nodes register: kubeconfig_result -# TODO: generate the node configs (openshift start node --write-config -# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml' -# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig' -# will need to modify the generated node config as needed -# (servingInfo.{certFile,clientCA,keyFile}) - - name: Register unregistered nodes kubernetes_register_node: - name: "{{ item.openshift_node_name }}" + client_user: openshift-client + name: "{{ item.openshift.common.hostname }}" api_version: "{{ openshift_kube_api_version }}" - cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}" - memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}" - pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}" - host_ip: "{{ item.openshift_node_host_ip }}" - labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}" - annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}" + cpu: "{{ item.openshift.node.resources_cpu | default(None) }}" + memory: "{{ item.openshift.node.resources_memory | default(None) }}" + pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}" + host_ip: "{{ item.openshift.common.ip }}" + labels: "{{ item.openshift.node.labels | default({}) }}" + annotations: "{{ item.openshift.node.annotations | default({}) }}" + external_id: "{{ item.openshift.node.external_id }}" # TODO: support customizing other attributes such as: client_config, # client_cluster, client_context, client_user # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml index 6fe2bf621..1730207f4 100644 --- a/roles/openshift_repos/defaults/main.yaml +++ b/roles/openshift_repos/defaults/main.yaml @@ -1,5 +1,7 @@ --- # TODO: once we are able to configure/deploy origin using the openshift roles, # then we should default to origin + +# TODO: push the defaulting of these values to the openshift_facts module openshift_deployment_type: online openshift_additional_repos: {} diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml index cc18c453c..0558b822c 100644 --- a/roles/openshift_repos/meta/main.yml +++ b/roles/openshift_repos/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 6219c4906..bb1551d37 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -1,6 +1,12 @@ --- # TODO: Add flag for enabling EPEL repo, default to false +# TODO: Add subscription-management config, with parameters +# for username, password, poolid(name), and official repos to +# enable/disable. Might need to make a module that extends the +# subscription management module to take a poolid and enable/disable the +# proper repos correctly. + - assert: that: openshift_deployment_type in known_openshift_deployment_types diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml deleted file mode 100644 index da7655546..000000000 --- a/roles/openshift_sdn_master/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}" diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml index e6e5514d1..5de32cc13 100644 --- a/roles/openshift_sdn_master/meta/main.yml +++ b/roles/openshift_sdn_master/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml index e1761afdc..f2d61043b 100644 --- a/roles/openshift_sdn_master/tasks/main.yml +++ b/roles/openshift_sdn_master/tasks/main.yml @@ -1,4 +1,13 @@ --- +# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been +# started yet + +- name: Set master sdn OpenShift facts + openshift_facts: + role: 'master_sdn' + local_facts: + debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}" + - name: Install openshift-sdn-master yum: pkg: openshift-sdn-master @@ -8,17 +17,10 @@ lineinfile: dest: /etc/sysconfig/openshift-sdn-master regexp: '^OPTIONS=' - line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\"" + line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\"" notify: - restart openshift-sdn-master -- name: Set openshift-sdn-master facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: sdn-master - option: debug_level - value: "{{ openshift_sdn_master_debug_level }}" - - name: Enable openshift-sdn-master service: name: openshift-sdn-master diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 2da2d74eb..e6b6a9503 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -17,12 +17,6 @@ From this role: | openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | -From openshift_node: -| Name | Default value | | -|-----------------------|------------------|--------------------------------------| -| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | - - From openshift_common: | Name | Default value | | |-------------------------------|---------------------|----------------------------------------| diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml deleted file mode 100644 index 9612d9d91..000000000 --- a/roles/openshift_sdn_node/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}" diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml index ab45ff51e..ffe10f836 100644 --- a/roles/openshift_sdn_node/meta/main.yml +++ b/roles/openshift_sdn_node/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml index ff05a6972..729c28879 100644 --- a/roles/openshift_sdn_node/tasks/main.yml +++ b/roles/openshift_sdn_node/tasks/main.yml @@ -1,4 +1,10 @@ --- +- name: Set node sdn OpenShift facts + openshift_facts: + role: 'node_sdn' + local_facts: + debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}" + - name: Install openshift-sdn-node yum: pkg: openshift-sdn-node @@ -14,28 +20,19 @@ backrefs: yes with_items: - regex: '^(OPTIONS=)' - line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"' + line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"' - regex: '^(MASTER_URL=)' - line: '\1"http://{{ openshift_master_ips | first }}:4001"' + line: '\1"{{ openshift_sdn_master_url }}"' - regex: '^(MINION_IP=)' - line: '\1"{{ openshift_public_ip }}"' + line: '\1"{{ openshift.common.ip }}"' # TODO lock down the insecure-registry config to a more sane value than # 0.0.0.0/0 - regex: '^(DOCKER_OPTIONS=)' line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"' notify: restart openshift-sdn-node -- name: Set openshift-sdn-node facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: sdn-node - option: debug_level - value: "{{ openshift_sdn_node_debug_level }}" - -# fixme: Once the openshift_cluster playbook is published state should be started -# Always bounce service to pick up new credentials - name: Start and enable openshift-sdn-node service: name: openshift-sdn-node enabled: yes - state: restarted + state: started diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml new file mode 100644 index 000000000..208065df2 --- /dev/null +++ b/roles/os_env_extras_node/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# From the origin rpm there exists instructions on how to +# setup origin properly. The following steps come from there +- name: Change root to be in the Docker group + user: name=root groups=dockerroot append=yes diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py old mode 100644 new mode 100755 index 6a018d022..90588d2ae --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -1,5 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 from subprocess import call, check_output diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml index 7a8cef6c5..8592371e8 100644 --- a/roles/os_firewall/meta/main.yml +++ b/roles/os_firewall/meta/main.yml @@ -1,3 +1,4 @@ +--- galaxy_info: author: Jason DeTiberus description: os_firewall diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 469cfab6f..b6bddd5c5 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -3,6 +3,7 @@ yum: name: firewalld state: present + register: install_result - name: Check if iptables-services is installed command: rpm -q iptables-services @@ -20,6 +21,10 @@ - ip6tables when: pkg_check.rc == 0 +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + - name: Start and enable firewalld service service: name: firewalld diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..7b5c00a9b 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -6,6 +6,7 @@ with_items: - iptables - iptables-services + register: install_result - name: Check if firewalld is installed command: rpm -q firewalld @@ -20,14 +21,15 @@ enabled: no when: pkg_check.rc == 0 -- name: Start and enable iptables services +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +- name: Start and enable iptables service service: - name: "{{ item }}" + name: iptables state: started enabled: yes - with_items: - - iptables - - ip6tables register: result - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail -- cgit v1.2.3 From 8a4888ad30ce7c5898caac47614da2e13a759320 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 1 Mar 2015 00:27:04 -0500 Subject: Add byo playbooks and enterprise docs - added byo playbooks - added byo (example) inventory - added a README_OSE.md for getting started with Enterprise deployments - Added an ansible.cfg as an example for configuration helpful for playbooks/roles --- roles/openshift_node/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'roles') diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 8cfef0e15..e3c04585b 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,6 +1,7 @@ --- # TODO: allow for overriding default ports where possible # TODO: trigger the external service when restart is needed + - name: Set node OpenShift facts openshift_facts: role: 'node' -- cgit v1.2.3 From b167f7b3c4082a3d990aabeb10faac888e7172b3 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 7 Apr 2015 22:34:00 -0400 Subject: move zbxapi module to a new os_zabbix role - cleans up repo root a bit --- roles/os_zabbix/library/zbxapi.py | 273 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100755 roles/os_zabbix/library/zbxapi.py (limited to 'roles') diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py new file mode 100755 index 000000000..f4f52909b --- /dev/null +++ b/roles/os_zabbix/library/zbxapi.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python + +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Purpose: An ansible module to communicate with zabbix. +# + +import json +import httplib2 +import sys +import os +import re + +class ZabbixAPI(object): + ''' + ZabbixAPI class + ''' + classes = { + 'Action': ['create', 'delete', 'get', 'update'], + 'Alert': ['get'], + 'Application': ['create', 'delete', 'get', 'massadd', 'update'], + 'Configuration': ['export', 'import'], + 'Dcheck': ['get'], + 'Dhost': ['get'], + 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Dservice': ['get'], + 'Event': ['acknowledge', 'get'], + 'Graph': ['create', 'delete', 'get', 'update'], + 'Graphitem': ['get'], + 'Graphprototype': ['create', 'delete', 'get', 'update'], + 'History': ['get'], + 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], + 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'], + 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], + 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Image': ['create', 'delete', 'get', 'update'], + 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Maintenance': ['create', 'delete', 'get', 'update'], + 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Mediatype': ['create', 'delete', 'get', 'update'], + 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Screen': ['create', 'delete', 'get', 'update'], + 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'], + 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'], + 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'], + 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'], + 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'], + 'Templatescreenitem': ['get'], + 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'], + 'Triggerprototype': ['create', 'delete', 'get', 'update'], + 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'], + 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'], + 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'], + 'Usermedia': ['get'], + } + + def __init__(self, data={}): + self.server = data['server'] or None + self.username = data['user'] or None + self.password = data['password'] or None + if any(map(lambda value: value == None, [self.server, self.username, self.password])): + print 'Please specify zabbix server url, username, and password.' + sys.exit(1) + + self.verbose = data.has_key('verbose') + self.use_ssl = data.has_key('use_ssl') + self.auth = None + + for class_name, method_names in self.classes.items(): + #obj = getattr(self, class_name)(self) + #obj.__dict__ + setattr(self, class_name.lower(), getattr(self, class_name)(self)) + + results = self.user.login(user=self.username, password=self.password) + + if results[0]['status'] == '200': + if results[1].has_key('result'): + self.auth = results[1]['result'] + elif results[1].has_key('error'): + print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error']) + sys.exit(1) + else: + print "Error in call to zabbix. Http status: {0}.".format(results[0]['status']) + sys.exit(1) + + def perform(self, method, params): + ''' + This method calls your zabbix server. + + It requires the following parameters in order for a proper request to be processed: + + jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0; + method - the API method being called; + params - parameters that will be passed to the API method; + id - an arbitrary identifier of the request; + auth - a user authentication token; since we don't have one yet, it's set to null. + ''' + http_method = "POST" + if params.has_key("http_method"): + http_method = params['http_method'] + + jsonrpc = "2.0" + if params.has_key('jsonrpc'): + jsonrpc = params['jsonrpc'] + + rid = 1 + if params.has_key('id'): + rid = params['id'] + + http = None + if self.use_ssl: + http = httplib2.Http() + else: + http = httplib2.Http( disable_ssl_certificate_validation=True,) + + headers = params.get('headers', {}) + headers["Content-type"] = "application/json" + + body = { + "jsonrpc": jsonrpc, + "method": method, + "params": params, + "id": rid, + 'auth': self.auth, + } + + if method in ['user.login','api.version']: + del body['auth'] + + body = json.dumps(body) + + if self.verbose: + print body + print method + print headers + httplib2.debuglevel = 1 + + response, results = http.request(self.server, http_method, body, headers) + + if self.verbose: + print response + print results + + try: + results = json.loads(results) + except ValueError as e: + results = {"error": e.message} + + return response, results + + ''' + This bit of metaprogramming is where the ZabbixAPI subclasses are created. + For each of ZabbixAPI.classes we create a class from the key and methods + from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class + to each subclass in order for each to be able to call the perform method. + ''' + @staticmethod + def meta(class_name, method_names): + # This meta method allows a class to add methods to it. + def meta_method(Class, method_name): + # This template method is a stub method for each of the subclass + # methods. + def template_method(self, **params): + return self.parent.perform(class_name.lower()+"."+method_name, params) + template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name) + template_method.__name__ = method_name + # this is where the template method is placed inside of the subclass + # e.g. setattr(User, "create", stub_method) + setattr(Class, template_method.__name__, template_method) + + # This class call instantiates a subclass. e.g. User + Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() }) + # This init method gets placed inside of the Class + # to allow it to be instantiated. A reference to the parent class(ZabbixAPI) + # is passed in to allow each class access to the perform method. + def __init__(self, parent): + self.parent = parent + # This attaches the init to the subclass. e.g. Create + setattr(Class, __init__.__name__, __init__) + # For each of our ZabbixAPI.classes dict values + # Create a method and attach it to our subclass. + # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile', + # 'update', 'iswritable', 'logout', 'addmedia', 'create', + # 'login', 'deletemedia', 'isreadable'], + # User.delete + # User.get + for method_name in method_names: + meta_method(Class, method_name) + # Return our subclass with all methods attached + return Class + +# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming +for class_name, method_names in ZabbixAPI.classes.items(): + setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names)) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + user=dict(default=None, type='str'), + password=dict(default=None, type='str'), + zbx_class=dict(choices=ZabbixAPI.classes.keys()), + action=dict(default=None, type='str'), + params=dict(), + debug=dict(default=False, type='bool'), + ), + #supports_check_mode=True + ) + + user = module.params.get('user', None) + if not user: + user = os.environ['ZABBIX_USER'] + + pw = module.params.get('password', None) + if not pw: + pw = os.environ['ZABBIX_PASSWORD'] + + server = module.params['server'] + + if module.params['debug']: + options['debug'] = True + + api_data = { + 'user': user, + 'password': pw, + 'server': server, + } + + if not user or not pw or not server: + module.fail_json('Please specify the user, password, and the zabbix server.') + + zapi = ZabbixAPI(api_data) + + zbx_class = module.params.get('zbx_class') + action = module.params.get('action') + params = module.params.get('params', {}) + + + # Get the instance we are trying to call + zbx_class_inst = zapi.__getattribute__(zbx_class.lower()) + # Get the instance's method we are trying to call + zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action] + # Make the call with the incoming params + results = zbx_action_method(zbx_class_inst, **params) + + # Results Section + changed_state = False + status = results[0]['status'] + if status not in ['200', '201']: + #changed_state = False + module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1])) + + module.exit_json(**{'results': results[1]['result']}) + +from ansible.module_utils.basic import * + +main() -- cgit v1.2.3 From 3c521113b4b7a79d69c788600df67c460c887963 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 8 Apr 2015 16:53:28 -0400 Subject: Adding yum_repo role for facilitating repository deployment --- roles/yum_repo/README.md | 34 ++++++++++++++++++++++++++++++++++ roles/yum_repo/defaults/main.yml | 5 +++++ roles/yum_repo/handlers/main.yml | 2 ++ roles/yum_repo/meta/main.yml | 8 ++++++++ roles/yum_repo/tasks/main.yml | 8 ++++++++ roles/yum_repo/templates/yumrepo.j2 | 5 +++++ roles/yum_repo/vars/main.yml | 2 ++ 7 files changed, 64 insertions(+) create mode 100644 roles/yum_repo/README.md create mode 100644 roles/yum_repo/defaults/main.yml create mode 100644 roles/yum_repo/handlers/main.yml create mode 100644 roles/yum_repo/meta/main.yml create mode 100644 roles/yum_repo/tasks/main.yml create mode 100644 roles/yum_repo/templates/yumrepo.j2 create mode 100644 roles/yum_repo/vars/main.yml (limited to 'roles') diff --git a/roles/yum_repo/README.md b/roles/yum_repo/README.md new file mode 100644 index 000000000..7f6a615cb --- /dev/null +++ b/roles/yum_repo/README.md @@ -0,0 +1,34 @@ +Role Name +========= + +This role allows easy deployment of yum repository config files. + +Requirements +------------ + +Yum + +Role Variables +-------------- + +Dependencies +------------ + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +ASL 2.0 + +Author Information +------------------ + +openshift online operations diff --git a/roles/yum_repo/defaults/main.yml b/roles/yum_repo/defaults/main.yml new file mode 100644 index 000000000..95e78af69 --- /dev/null +++ b/roles/yum_repo/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for yum-repo +repo_enabled: "1" +repo_gpg_check: "1" + diff --git a/roles/yum_repo/handlers/main.yml b/roles/yum_repo/handlers/main.yml new file mode 100644 index 000000000..a48c89ac2 --- /dev/null +++ b/roles/yum_repo/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for yum-repo diff --git a/roles/yum_repo/meta/main.yml b/roles/yum_repo/meta/main.yml new file mode 100644 index 000000000..e0b53ce7f --- /dev/null +++ b/roles/yum_repo/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: openshift operations + description: + company: RedHat + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/yum_repo/tasks/main.yml b/roles/yum_repo/tasks/main.yml new file mode 100644 index 000000000..a56d1f133 --- /dev/null +++ b/roles/yum_repo/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for yum-repo + +- name: Installing yum-repo template + template: + src: yumrepo.j2 + dest: /etc/yum.repos.d/{{ repo_tag }}.repo + diff --git a/roles/yum_repo/templates/yumrepo.j2 b/roles/yum_repo/templates/yumrepo.j2 new file mode 100644 index 000000000..b06a6f41a --- /dev/null +++ b/roles/yum_repo/templates/yumrepo.j2 @@ -0,0 +1,5 @@ +[{{ repo_tag }}] +name={{ repo_name }} +baseurl={{ repo_baseurl }} +enabled={{ repo_enabled }} +gpg_check={{ repo_gpg_check }} diff --git a/roles/yum_repo/vars/main.yml b/roles/yum_repo/vars/main.yml new file mode 100644 index 000000000..48182ac8e --- /dev/null +++ b/roles/yum_repo/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for yum-repo -- cgit v1.2.3 From 9f59e1bad63fa3841c9f2a50d4b46dbd35601788 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Thu, 9 Apr 2015 09:56:41 -0400 Subject: added more options to the yum repo --- roles/yum_repo/templates/yumrepo.j2 | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'roles') diff --git a/roles/yum_repo/templates/yumrepo.j2 b/roles/yum_repo/templates/yumrepo.j2 index b06a6f41a..af879be31 100644 --- a/roles/yum_repo/templates/yumrepo.j2 +++ b/roles/yum_repo/templates/yumrepo.j2 @@ -3,3 +3,7 @@ name={{ repo_name }} baseurl={{ repo_baseurl }} enabled={{ repo_enabled }} gpg_check={{ repo_gpg_check }} +sslverify={{ repo_sslverify }} +sslclientcert={{ repo_client_cert }} +sslclientkey={{ repo_client_key }} +gpgkey={{ repo_gpgkey }} -- cgit v1.2.3 From f28ff57f98140a1a22423df34d6457ee669fe714 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 9 Apr 2015 12:58:43 -0400 Subject: refactor yum_repo role to handle multiple repos/files - Rename yum_repo role to yum_repos - Update yum_repos to take a more complex datastructure to describe multiple repo files and multiple repos within those files - Update the template to support multiple repos within the repo file - Update the template to allow for any key, value pair passed in instead of a hard coded list. - Add assertions to verify the repo_files variable is properly defined - Convert the legacy variables to the new repo_files variable --- roles/yum_repo/README.md | 34 ----------- roles/yum_repo/defaults/main.yml | 5 -- roles/yum_repo/handlers/main.yml | 2 - roles/yum_repo/meta/main.yml | 8 --- roles/yum_repo/tasks/main.yml | 8 --- roles/yum_repo/templates/yumrepo.j2 | 9 --- roles/yum_repo/vars/main.yml | 2 - roles/yum_repos/README.md | 113 +++++++++++++++++++++++++++++++++++ roles/yum_repos/defaults/main.yml | 3 + roles/yum_repos/meta/main.yml | 8 +++ roles/yum_repos/tasks/main.yml | 47 +++++++++++++++ roles/yum_repos/templates/yumrepo.j2 | 18 ++++++ 12 files changed, 189 insertions(+), 68 deletions(-) delete mode 100644 roles/yum_repo/README.md delete mode 100644 roles/yum_repo/defaults/main.yml delete mode 100644 roles/yum_repo/handlers/main.yml delete mode 100644 roles/yum_repo/meta/main.yml delete mode 100644 roles/yum_repo/tasks/main.yml delete mode 100644 roles/yum_repo/templates/yumrepo.j2 delete mode 100644 roles/yum_repo/vars/main.yml create mode 100644 roles/yum_repos/README.md create mode 100644 roles/yum_repos/defaults/main.yml create mode 100644 roles/yum_repos/meta/main.yml create mode 100644 roles/yum_repos/tasks/main.yml create mode 100644 roles/yum_repos/templates/yumrepo.j2 (limited to 'roles') diff --git a/roles/yum_repo/README.md b/roles/yum_repo/README.md deleted file mode 100644 index 7f6a615cb..000000000 --- a/roles/yum_repo/README.md +++ /dev/null @@ -1,34 +0,0 @@ -Role Name -========= - -This role allows easy deployment of yum repository config files. - -Requirements ------------- - -Yum - -Role Variables --------------- - -Dependencies ------------- - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -ASL 2.0 - -Author Information ------------------- - -openshift online operations diff --git a/roles/yum_repo/defaults/main.yml b/roles/yum_repo/defaults/main.yml deleted file mode 100644 index 95e78af69..000000000 --- a/roles/yum_repo/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# defaults file for yum-repo -repo_enabled: "1" -repo_gpg_check: "1" - diff --git a/roles/yum_repo/handlers/main.yml b/roles/yum_repo/handlers/main.yml deleted file mode 100644 index a48c89ac2..000000000 --- a/roles/yum_repo/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for yum-repo diff --git a/roles/yum_repo/meta/main.yml b/roles/yum_repo/meta/main.yml deleted file mode 100644 index e0b53ce7f..000000000 --- a/roles/yum_repo/meta/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -galaxy_info: - author: openshift operations - description: - company: RedHat - license: ASL 2.0 - min_ansible_version: 1.2 -dependencies: [] diff --git a/roles/yum_repo/tasks/main.yml b/roles/yum_repo/tasks/main.yml deleted file mode 100644 index a56d1f133..000000000 --- a/roles/yum_repo/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# tasks file for yum-repo - -- name: Installing yum-repo template - template: - src: yumrepo.j2 - dest: /etc/yum.repos.d/{{ repo_tag }}.repo - diff --git a/roles/yum_repo/templates/yumrepo.j2 b/roles/yum_repo/templates/yumrepo.j2 deleted file mode 100644 index af879be31..000000000 --- a/roles/yum_repo/templates/yumrepo.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[{{ repo_tag }}] -name={{ repo_name }} -baseurl={{ repo_baseurl }} -enabled={{ repo_enabled }} -gpg_check={{ repo_gpg_check }} -sslverify={{ repo_sslverify }} -sslclientcert={{ repo_client_cert }} -sslclientkey={{ repo_client_key }} -gpgkey={{ repo_gpgkey }} diff --git a/roles/yum_repo/vars/main.yml b/roles/yum_repo/vars/main.yml deleted file mode 100644 index 48182ac8e..000000000 --- a/roles/yum_repo/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for yum-repo diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md new file mode 100644 index 000000000..51ecd5d34 --- /dev/null +++ b/roles/yum_repos/README.md @@ -0,0 +1,113 @@ +Yum Repos +========= + +This role allows easy deployment of yum repository config files. + +Requirements +------------ + +Yum + +Role Variables +-------------- + +| Name | Default value | | +|-------------------|---------------|--------------------------------------------| +| repo_files | None | | +| repo_enabled | 1 | Should repos be enabled by default | +| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default | + +Dependencies +------------ + +Example Playbook +---------------- + +A single repo file containing a single repo: + - hosts: servers + roles: + - role: yum_repos + repo_files: + - id: my_repo + repos: + - id: my_repo + name: My Awesome Repo + baseurl: https://my.awesome.repo/is/available/here + skip_if_unavailable: yes + gpgkey: https://my.awesome.repo/pubkey.gpg + +A single repo file containing a single repo, disabling gpgcheck + - hosts: servers + roles: + - role: yum_repos + repo_files: + - id: my_other_repo + repos: + - id: my_other_repo + name: My Other Awesome Repo + baseurl: https://my.other.awesome.repo/is/available/here + gpgcheck: no + +A single repo file containing a single disabled repo + - hosts: servers + roles: + - role: yum_repos + repo_files: + - id: my_other_repo + repos: + - id: my_other_repo + name: My Other Awesome Repo + baseurl: https://my.other.awesome.repo/is/available/here + enabled: no + +A single repo file containing multiple repos + - hosts: servers + roles: + - role: yum_repos + repo_files: + id: my_repos + repos: + - id: my_repo + name: My Awesome Repo + baseurl: https://my.awesome.repo/is/available/here + gpgkey: https://my.awesome.repo/pubkey.gpg + - id: my_other_repo + name: My Other Awesome Repo + baseurl: https://my.other.awesome.repo/is/available/here + gpgkey: https://my.other.awesome.repo/pubkey.gpg + +Multiple repo files containing multiple repos + - hosts: servers + roles: + - role: yum_repos + repo_files: + - id: my_repos + repos: + - id: my_repo + name: My Awesome Repo + baseurl: https://my.awesome.repo/is/available/here + gpgkey: https://my.awesome.repo/pubkey.gpg + - id: my_other_repo + name: My Other Awesome Repo + baseurl: https://my.other.awesome.repo/is/available/here + gpgkey: https://my.other.awesome.repo/pubkey.gpg + - id: joes_repos + repos: + - id: joes_repo + name: Joe's Less Awesome Repo + baseurl: https://joes.repo/is/here + gpgkey: https://joes.repo/pubkey.gpg + - id: joes_otherrepo + name: Joe's Other Less Awesome Repo + baseurl: https://joes.repo/is/there + gpgkey: https://joes.repo/pubkey.gpg + +License +------- + +ASL 2.0 + +Author Information +------------------ + +openshift online operations diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml new file mode 100644 index 000000000..515fb7a4a --- /dev/null +++ b/roles/yum_repos/defaults/main.yml @@ -0,0 +1,3 @@ +--- +repo_enabled: 1 +repo_gpgcheck: 1 diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml new file mode 100644 index 000000000..6b8374da9 --- /dev/null +++ b/roles/yum_repos/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: openshift operations + description: + company: Red Hat, Inc. + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml new file mode 100644 index 000000000..a9903c6c6 --- /dev/null +++ b/roles/yum_repos/tasks/main.yml @@ -0,0 +1,47 @@ +--- +# Convert old params to new params +- set_fact: + repo_files: + - id: "{{ repo_tag }}" + repos: + - id: "{{ repo_tag }}" + name: "{{ repo_name }}" + baseurl: "{{ repo_baseurl }}" + enabled: "{{ repo_enabled }}" + gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}" + sslverify: "{{ repo_sslverify | default(None) }}" + sslclientcert: "{{ repo_sslclientcert | default(None) }}" + sslclientkey: "{{ repo_sslclientkey | default(None) }}" + gpgkey: "{{ repo_gpgkey | default(None) }}" + when: repo_files is not defined + +- name: Verify repo_files is a list + assert: + that: + - repo_files is iterable and repo_files is not string and repo_files is not mapping + +- name: Verify repo_files items have an id and a repos list + assert: + that: + - item is mapping + - "'id' in item" + - "'repos' in item" + - item.repos is iterable and item.repos is not string and item.repos is not mapping + with_items: repo_files + +- name: Verify that repo_files.repos have the required keys + assert: + that: + - item.1 is mapping + - "'id' in item.1" + - "'name' in item.1" + - "'baseurl' in item.1" + with_subelements: + - repo_files + - repos + +- name: Installing yum-repo template + template: + src: yumrepo.j2 + dest: /etc/yum.repos.d/{{ item.id }}.repo + with_items: repo_files diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2 new file mode 100644 index 000000000..0dfdbfe43 --- /dev/null +++ b/roles/yum_repos/templates/yumrepo.j2 @@ -0,0 +1,18 @@ +{% set repos = item.repos %} +{% for repo in repos %} +[{{ repo.id }}] +name={{ repo.name }} +baseurl={{ repo.baseurl }} +{% set repo_enabled_value = repo.enabled | default(repo_enabled) %} +{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %} +enabled={{ enable_repo }} +{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %} +{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %} +gpgcheck={{ enable_gpgcheck }} +{% for key, value in repo.iteritems() %} +{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} + +{% endfor %} -- cgit v1.2.3 From 8022525b3c335ecd8213429da428fc04228adcf2 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Thu, 9 Apr 2015 15:28:43 -0400 Subject: added sebools for ansible tower config --- roles/ansible_tower/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'roles') diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml index f58a5b1c2..e9bde9478 100644 --- a/roles/ansible_tower/tasks/main.yaml +++ b/roles/ansible_tower/tasks/main.yaml @@ -25,3 +25,9 @@ - name: Open firewalld port for https firewalld: port=8080/tcp permanent=true state=enabled +- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots + seboolean: name=httpd_can_network_connect state=yes persistent=yes + +- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots + seboolean: name=httpd_can_network_connect_db state=yes persistent=yes + -- cgit v1.2.3 From 1917cd3f88299c4dc23ef344c0e2aefc7e79db4f Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 9 Apr 2015 16:43:14 -0400 Subject: Adding a multi_ec2 yaml configure role --- roles/openshift_ansible_inventory/README.md | 41 ++++++++++++++++++++++ .../openshift_ansible_inventory/defaults/main.yml | 4 +++ .../openshift_ansible_inventory/handlers/main.yml | 2 ++ roles/openshift_ansible_inventory/meta/main.yml | 8 +++++ roles/openshift_ansible_inventory/tasks/main.yml | 11 ++++++ .../templates/multi_ec2.yaml.j2 | 11 ++++++ roles/openshift_ansible_inventory/vars/main.yml | 2 ++ 7 files changed, 79 insertions(+) create mode 100644 roles/openshift_ansible_inventory/README.md create mode 100644 roles/openshift_ansible_inventory/defaults/main.yml create mode 100644 roles/openshift_ansible_inventory/handlers/main.yml create mode 100644 roles/openshift_ansible_inventory/meta/main.yml create mode 100644 roles/openshift_ansible_inventory/tasks/main.yml create mode 100644 roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 create mode 100644 roles/openshift_ansible_inventory/vars/main.yml (limited to 'roles') diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md new file mode 100644 index 000000000..69a07effd --- /dev/null +++ b/roles/openshift_ansible_inventory/README.md @@ -0,0 +1,41 @@ +Openshift Ansible Inventory +========= + +Install and configure openshift-ansible-inventory. + +Requirements +------------ + +None + +Role Variables +-------------- + +oo_inventory_group +oo_inventory_user +oo_inventory_accounts +oo_inventory_cache_max_age + +Dependencies +------------ + +None + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +ASL 2.0 + +Author Information +------------------ + +Openshift operations, Red Hat, Inc diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml new file mode 100644 index 000000000..f53c00c80 --- /dev/null +++ b/roles/openshift_ansible_inventory/defaults/main.yml @@ -0,0 +1,4 @@ +--- +oo_inventory_group: root +oo_inventory_owner: root +oo_inventory_cache_max_age: 1800 diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml new file mode 100644 index 000000000..e2db43477 --- /dev/null +++ b/roles/openshift_ansible_inventory/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for openshift_ansible_inventory diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml new file mode 100644 index 000000000..ff3df0a7d --- /dev/null +++ b/roles/openshift_ansible_inventory/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: Openshift + description: Install and configure openshift-ansible-inventory + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml new file mode 100644 index 000000000..3990d5750 --- /dev/null +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- yum: + name: openshift-ansible-inventory + state: present + +- template: + src: multi_ec2.yaml.j2 + dest: /etc/ansible/multi_ec2.yaml + group: "{{ oo_inventory_group }}" + owner: "{{ oo_inventory_owner }}" + mode: "0640" diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 new file mode 100644 index 000000000..23dfe73b8 --- /dev/null +++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 @@ -0,0 +1,11 @@ +# multi ec2 inventory configs +cache_max_age: {{ oo_inventory_cache_max_age }} +accounts: +{% for account in oo_inventory_accounts %} + - name: {{ account.name }} + provider: {{ account.provider }} + env_vars: + AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }} + +{% endfor %} diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml new file mode 100644 index 000000000..25c049282 --- /dev/null +++ b/roles/openshift_ansible_inventory/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for openshift_ansible_inventory -- cgit v1.2.3 From 7ffc6a28edad3f20604dd13e16b8f57cf670b25e Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 16 Apr 2015 13:08:42 -0400 Subject: Adding ansible-tower-cli rpm to tower --- roles/ansible_tower/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) (limited to 'roles') diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml index e9bde9478..1d75a95e6 100644 --- a/roles/ansible_tower/tasks/main.yaml +++ b/roles/ansible_tower/tasks/main.yaml @@ -9,6 +9,7 @@ - ansible - telnet - ack + - python-ansible-tower-cli - name: download Tower setup get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no -- cgit v1.2.3