From 6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 1 Apr 2015 15:09:19 -0400 Subject: Configuration updates for latest builds and major refactor Configuration updates for latest builds - Switch to using create-node-config - Switch sdn services to use etcd over SSL - This re-uses the client certificate deployed on each node - Additional node registration changes - Do not assume that metadata service is available in openshift_facts module - Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node - Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks - Start moving generated configs to /etc/openshift - Some custom module cleanup - Add known issue with ansible-1.9 to README_OSE.md - Update to genericize the kubernetes_register_node module - Default to use kubectl for commands - Allow for overriding kubectl_cmd - In openshift_register_node role, override kubectl_cmd to openshift_kube - Set default openshift_registry_url for enterprise when deployment_type is enterprise - Fix openshift_register_node for client config change - Ensure that master certs directory is created - Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node - Allow non-root user with sudo nopasswd access - Updates for README_OSE.md - Update byo inventory for adding additional comments - Updates for node cert/config sync to work with non-root user using sudo - Move node config/certs to /etc/openshift/node - Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154 Create common playbooks - create common/openshift-master/config.yml - create common/openshift-node/config.yml - update playbooks to use new common playbooks - update launch playbooks to call update playbooks - fix openshift_registry and openshift_node_ip usage Set default deployment type to origin - openshift_repo updates for enabling origin deployments - also separate repo and gpgkey file structure - remove kubernetes repo since it isn't currently needed - full deployment type support for bin/cluster - honor OS_DEPLOYMENT_TYPE env variable - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to origin installs Additional changes: - Add separate config action to bin/cluster that runs ansible config but does not update packages - Some more duplication reduction in cluster playbooks. - Rename task files in playbooks dirs to have tasks in their name for clarity. - update aws/gce scripts to use a directory for inventory (otherwise when there are no hosts returned from dynamic inventory there is an error) libvirt refactor and update - add libvirt dynamic inventory - updates to use dynamic inventory for libvirt --- roles/openshift_master/tasks/main.yml | 64 +++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 15 deletions(-) (limited to 'roles/openshift_master/tasks') diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aa615df39..1b1210007 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,33 +11,67 @@ api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" + +# TODO: These values need to be configurable +- name: Set dns OpenShift facts + openshift_facts: + role: 'dns' + local_facts: + ip: "{{ openshift.common.ip }}" + domain: local - name: Install OpenShift Master package yum: pkg=openshift-master state=installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +- name: Create certificate parent directory if it doesn't exist + file: + path: "{{ openshift_cert_parent_dir }}" + state: directory + +- name: Create config parent directory if it doesn't exist + file: + path: "{{ openshift_master_config | dirname }}" + state: directory + +# TODO: should probably use a template lookup for this +# TODO: should allow for setting --etcd, --kubernetes options +# TODO: recreate config if values change +- name: Use enterprise default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined +- name: Create master config + command: > + /usr/bin/openshift start master --write-config + --config={{ openshift_master_config }} + --portal-net={{ openshift.master.portal_net }} + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} + --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }} + {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} + {{ ('--nodes=' ~ openshift_node_ips | join(',')) if openshift_node_ips is defined else '' }} + args: + chdir: "{{ openshift_cert_parent_dir }}" + creates: "{{ openshift_master_config }}" -# TODO: We should pre-generate the master config and point to the generated -# config rather than setting command line flags here - name: Configure OpenShift settings lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\"" - notify: - - restart openshift-master - -# TODO: should this be populated by a fact based on the deployment type -# (origin, online, enterprise)? -- name: Set default registry url - lineinfile: - dest: /etc/sysconfig/openshift-master - regexp: '^IMAGES=' - line: "IMAGES={{ openshift_registry_url }}" - when: openshift_registry_url is defined + line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\"" notify: - restart openshift-master @@ -53,6 +87,6 @@ # TODO: Update this file if the contents of the source file are not present in # the dest file, will need to make sure to ignore things that could be added - name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig + command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig args: creates: /root/.kube/.kubeconfig -- cgit v1.2.3 From 06cfccafd93995a59cf3a620e460d0cb07328188 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 15 Apr 2015 16:17:09 -0400 Subject: Fix issue with nodes being set to an empty string when generating master config --- roles/openshift_master/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'roles/openshift_master/tasks') diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 1b1210007..3ea485610 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -61,8 +61,8 @@ --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }} - {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} - {{ ('--nodes=' ~ openshift_node_ips | join(',')) if openshift_node_ips is defined else '' }} + {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }} + {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }} args: chdir: "{{ openshift_cert_parent_dir }}" creates: "{{ openshift_master_config }}" -- cgit v1.2.3 From 2823c990eda7abd19447b988131f2c0eb8a9fdcc Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Wed, 22 Apr 2015 10:49:29 -0400 Subject: Use docker-registry.ops when deploying as online --- roles/openshift_master/tasks/main.yml | 6 ++++++ roles/openshift_register_nodes/tasks/main.yml | 6 ++++++ 2 files changed, 12 insertions(+) (limited to 'roles/openshift_master/tasks') diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 3ea485610..da184e972 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -53,6 +53,12 @@ set_fact: openshift_registry_url: "openshift3_beta/ose-${component}:${version}" when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined + +- name: Use online default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined + - name: Create master config command: > /usr/bin/openshift start master --write-config diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 85f490f70..85bead16d 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -10,6 +10,12 @@ set_fact: openshift_registry_url: "openshift3_beta/ose-${component}:${version}" when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined + +- name: Use online default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined + - name: Create node config command: > /usr/bin/openshift admin create-node-config -- cgit v1.2.3 From 720e1b7155f09a659637cc9564cd2e76042345bf Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 17 Apr 2015 15:50:30 -0400 Subject: Fixes for latest osc client config changes - also pylint fixes --- roles/openshift_master/tasks/main.yml | 18 +- .../library/kubernetes_register_node.py | 413 ++++++++++++++++----- roles/openshift_register_nodes/tasks/main.yml | 3 +- 3 files changed, 334 insertions(+), 100 deletions(-) (limited to 'roles/openshift_master/tasks') diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index da184e972..28bdda618 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -84,15 +84,23 @@ - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started -- name: Create .kube directory +- name: Create the OpenShift client config dir(s) file: - path: /root/.kube + path: "~{{ item }}/.config/openshift" state: directory mode: 0700 + owner: "{{ item }}" + group: "{{ item }}" + with_items: + - root + - "{{ ansible_ssh_user }}" # TODO: Update this file if the contents of the source file are not present in # the dest file, will need to make sure to ignore things that could be added -- name: Configure root user kubeconfig - command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig +- name: Create the OpenShift client config(s) + command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config args: - creates: /root/.kube/.kubeconfig + creates: ~{{ item }}/.config/openshift/.config + with_items: + - root + - "{{ ansible_ssh_user }}" diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py index 1ec977716..afa9eb27d 100755 --- a/roles/openshift_register_nodes/library/kubernetes_register_node.py +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -1,12 +1,21 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 +# +# disable pylint checks +# temporarily disabled until items can be addressed: +# fixme - until all TODO comments have been addressed +# permanently disabled unless someone wants to refactor the object model: +# too-few-public-methods +# no-self-use +# too-many-arguments +# too-many-locals +# too-many-branches +# pylint:disable=fixme, too-many-arguments, no-self-use +# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods +"""Ansible module to register a kubernetes node to the cluster""" import os -import multiprocessing -import socket -from subprocess import check_output, Popen -from decimal import * DOCUMENTATION = ''' --- @@ -93,73 +102,170 @@ EXAMPLES = ''' class ClientConfigException(Exception): + """Client Configuration Exception""" pass -class ClientConfig: +class ClientConfig(object): + """ Representation of a client config + + Attributes: + config (dict): dictionary representing the client configuration + + Args: + client_opts (list of str): client options to use + module (AnsibleModule): + + Raises: + ClientConfigException: + """ def __init__(self, client_opts, module): kubectl = module.params['kubectl_cmd'] - _, output, error = module.run_command(kubectl + ["config", "view", "-o", "json"] + client_opts, check_rc = True) + _, output, _ = module.run_command((kubectl + + ["config", "view", "-o", "json"] + + client_opts), check_rc=True) self.config = json.loads(output) if not (bool(self.config['clusters']) or bool(self.config['contexts']) or bool(self.config['current-context']) or bool(self.config['users'])): - raise ClientConfigException(msg="Client config missing required " \ - "values", - output=output) + raise ClientConfigException( + "Client config missing required values: %s" % output + ) def current_context(self): + """ Gets the current context for the client config + + Returns: + str: The current context as set in the config + """ return self.config['current-context'] def section_has_value(self, section_name, value): + """ Test if specified section contains a value + + Args: + section_name (str): config section to test + value (str): value to test if present + Returns: + bool: True if successful, false otherwise + """ section = self.config[section_name] if isinstance(section, dict): return value in section else: val = next((item for item in section - if item['name'] == value), None) + if item['name'] == value), None) return val is not None def has_context(self, context): + """ Test if specified context exists in config + + Args: + context (str): value to test if present + Returns: + bool: True if successful, false otherwise + """ return self.section_has_value('contexts', context) def has_user(self, user): + """ Test if specified user exists in config + + Args: + context (str): value to test if present + Returns: + bool: True if successful, false otherwise + """ return self.section_has_value('users', user) def has_cluster(self, cluster): + """ Test if specified cluster exists in config + + Args: + context (str): value to test if present + Returns: + bool: True if successful, false otherwise + """ return self.section_has_value('clusters', cluster) def get_value_for_context(self, context, attribute): + """ Get the value of attribute in context + + Args: + context (str): context to search + attribute (str): attribute wanted + Returns: + str: The value for attribute in context + """ contexts = self.config['contexts'] if isinstance(contexts, dict): return contexts[context][attribute] else: return next((c['context'][attribute] for c in contexts - if c['name'] == context), None) + if c['name'] == context), None) def get_user_for_context(self, context): + """ Get the user attribute in context + + Args: + context (str): context to search + Returns: + str: The value for the attribute in context + """ return self.get_value_for_context(context, 'user') def get_cluster_for_context(self, context): + """ Get the cluster attribute in context + + Args: + context (str): context to search + Returns: + str: The value for the attribute in context + """ return self.get_value_for_context(context, 'cluster') def get_namespace_for_context(self, context): + """ Get the namespace attribute in context + + Args: + context (str): context to search + Returns: + str: The value for the attribute in context + """ return self.get_value_for_context(context, 'namespace') -class Util: +class Util(object): + """Utility methods""" @staticmethod def remove_empty_elements(mapping): + """ Recursively removes empty elements from a dict + + Args: + mapping (dict): dict to remove empty attributes from + Returns: + dict: A copy of the dict with empty elements removed + """ if isinstance(mapping, dict): - m = mapping.copy() + copy = mapping.copy() for key, val in mapping.iteritems(): if not val: - del m[key] - return m + del copy[key] + return copy else: return mapping -class NodeResources: +class NodeResources(object): + """ Kubernetes Node Resources + + Attributes: + resources (dict): A dictionary representing the node resources + + Args: + version (str): kubernetes api version + cpu (str): string representation of the cpu resources for the node + memory (str): string representation of the memory resources for the + node + """ def __init__(self, version, cpu=None, memory=None): if version == 'v1beta1': self.resources = dict(capacity=dict()) @@ -167,10 +273,31 @@ class NodeResources: self.resources['capacity']['memory'] = memory def get_resources(self): + """ Get the dict representing the node resources + + Returns: + dict: representation of the node resources with any empty + elements removed + """ return Util.remove_empty_elements(self.resources) -class NodeSpec: - def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): +class NodeSpec(object): + """ Kubernetes Node Spec + + Attributes: + spec (dict): A dictionary representing the node resources + + Args: + version (str): kubernetes api version + cpu (str): string representation of the cpu resources for the node + memory (str): string representation of the memory resources for the + node + cidr (str): string representation of the cidr block available for + the node + externalID (str): The external id of the node + """ + def __init__(self, version, cpu=None, memory=None, cidr=None, + externalID=None): if version == 'v1beta3': self.spec = dict(podCIDR=cidr, externalID=externalID, capacity=dict()) @@ -178,67 +305,128 @@ class NodeSpec: self.spec['capacity']['memory'] = memory def get_spec(self): + """ Get the dict representing the node spec + + Returns: + dict: representation of the node spec with any empty elements + removed + """ return Util.remove_empty_elements(self.spec) -class NodeStatus: - def addAddresses(self, addressType, addresses): - addressList = [] +class NodeStatus(object): + """ Kubernetes Node Status + + Attributes: + status (dict): A dictionary representing the node status + + Args: + version (str): kubernetes api version + externalIPs (list, optional): externalIPs for the node + internalIPs (list, optional): internalIPs for the node + hostnames (list, optional): hostnames for the node + """ + def add_addresses(self, address_type, addresses): + """ Adds addresses of the specified type + + Args: + address_type (str): address type + addresses (list): addresses to add + """ + address_list = [] for address in addresses: - addressList.append(dict(type=addressType, address=address)) - return addressList + address_list.append(dict(type=address_type, address=address)) + return address_list - def __init__(self, version, externalIPs = [], internalIPs = [], - hostnames = []): + def __init__(self, version, externalIPs=None, internalIPs=None, + hostnames=None): if version == 'v1beta3': - self.status = dict(addresses = addAddresses('ExternalIP', - externalIPs) + - addAddresses('InternalIP', - internalIPs) + - addAddresses('Hostname', - hostnames)) + addresses = [] + if externalIPs is not None: + addresses += self.add_addresses('ExternalIP', externalIPs) + if internalIPs is not None: + addresses += self.add_addresses('InternalIP', internalIPs) + if hostnames is not None: + addresses += self.add_addresses('Hostname', hostnames) + + self.status = dict(addresses=addresses) def get_status(self): + """ Get the dict representing the node status + + Returns: + dict: representation of the node status with any empty elements + removed + """ return Util.remove_empty_elements(self.status) -class Node: - def __init__(self, module, client_opts, version='v1beta1', name=None, - hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], - cpu=None, memory=None, labels=dict(), annotations=dict(), - podCIDR=None, externalID=None): +class Node(object): + """ Kubernetes Node + + Attributes: + status (dict): A dictionary representing the node + + Args: + module (AnsibleModule): + client_opts (list): client connection options + version (str, optional): kubernetes api version + node_name (str, optional): name for node + hostIP (str, optional): node host ip + hostnames (list, optional): hostnames for the node + externalIPs (list, optional): externalIPs for the node + internalIPs (list, optional): internalIPs for the node + cpu (str, optional): cpu resources for the node + memory (str, optional): memory resources for the node + labels (list, optional): labels for the node + annotations (list, optional): annotations for the node + podCIDR (list, optional): cidr block to use for pods + externalID (str, optional): external id of the node + """ + def __init__(self, module, client_opts, version='v1beta1', node_name=None, + hostIP=None, hostnames=None, externalIPs=None, + internalIPs=None, cpu=None, memory=None, labels=None, + annotations=None, podCIDR=None, externalID=None): self.module = module self.client_opts = client_opts if version == 'v1beta1': - self.node = dict(id = name, - kind = 'Node', - apiVersion = version, - hostIP = hostIP, - resources = NodeResources(version, cpu, memory), - cidr = podCIDR, - labels = labels, - annotations = annotations, - externalID = externalID - ) + self.node = dict(id=node_name, + kind='Node', + apiVersion=version, + hostIP=hostIP, + resources=NodeResources(version, cpu, memory), + cidr=podCIDR, + labels=labels, + annotations=annotations, + externalID=externalID) elif version == 'v1beta3': - metadata = dict(name = name, - labels = labels, - annotations = annotations - ) - self.node = dict(kind = 'Node', - apiVersion = version, - metadata = metadata, - spec = NodeSpec(version, cpu, memory, podCIDR, - externalID), - status = NodeStatus(version, externalIPs, - internalIPs, hostnames), - ) + metadata = dict(name=node_name, + labels=labels, + annotations=annotations) + self.node = dict(kind='Node', + apiVersion=version, + metadata=metadata, + spec=NodeSpec(version, cpu, memory, podCIDR, + externalID), + status=NodeStatus(version, externalIPs, + internalIPs, hostnames)) def get_name(self): + """ Get the name for the node + + Returns: + str: node name + """ if self.node['apiVersion'] == 'v1beta1': return self.node['id'] elif self.node['apiVersion'] == 'v1beta3': return self.node['name'] def get_node(self): + """ Get the dict representing the node + + Returns: + dict: representation of the node with any empty elements + removed + """ node = self.node.copy() if self.node['apiVersion'] == 'v1beta1': node['resources'] = self.node['resources'].get_resources() @@ -248,54 +436,82 @@ class Node: return Util.remove_empty_elements(node) def exists(self): + """ Tests if the node already exists + + Returns: + bool: True if node exists, otherwise False + """ kubectl = self.module.params['kubectl_cmd'] - _, output, error = self.module.run_command(kubectl + ["get", "nodes"] + self.client_opts, check_rc = True) + _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] + + self.client_opts), + check_rc=True) if re.search(self.module.params['name'], output, re.MULTILINE): return True return False def create(self): + """ Creates the node + + Returns: + bool: True if node creation successful + """ kubectl = self.module.params['kubectl_cmd'] cmd = kubectl + self.client_opts + ['create', '-f', '-'] - rc, output, error = self.module.run_command(cmd, - data=self.module.jsonify(self.get_node())) - if rc != 0: + exit_code, output, error = self.module.run_command( + cmd, data=self.module.jsonify(self.get_node()) + ) + if exit_code != 0: if re.search("minion \"%s\" already exists" % self.get_name(), error): - self.module.exit_json(changed=False, - msg="node definition already exists", - node=self.get_node()) + self.module.exit_json(msg="node definition already exists", + changed=False, node=self.get_node()) else: - self.module.fail_json(msg="Node creation failed.", rc=rc, - output=output, error=error, - node=self.get_node()) + self.module.fail_json(msg="Node creation failed.", + exit_code=exit_code, + output=output, error=error, + node=self.get_node()) else: return True def main(): + """ main """ module = AnsibleModule( - argument_spec = dict( - name = dict(required = True, type = 'str'), - host_ip = dict(type = 'str'), - hostnames = dict(type = 'list', default = []), - external_ips = dict(type = 'list', default = []), - internal_ips = dict(type = 'list', default = []), - api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 - choices = ['v1beta1', 'v1beta3']), - cpu = dict(type = 'str'), - memory = dict(type = 'str'), - labels = dict(type = 'dict', default = {}), # TODO: needs documented - annotations = dict(type = 'dict', default = {}), # TODO: needs documented - pod_cidr = dict(type = 'str'), # TODO: needs documented - external_id = dict(type = 'str'), # TODO: needs documented - client_config = dict(type = 'str'), # TODO: needs documented - client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented - client_context = dict(type = 'str', default = 'default'), # TODO: needs documented - client_namespace = dict(type = 'str', default = 'default'), # TODO: needs documented - client_user = dict(type = 'str', default = 'system:openshift-client'), # TODO: needs documented - kubectl_cmd = dict(type = 'list', default = ['kubectl']) # TODO: needs documented + argument_spec=dict( + name=dict(required=True, type='str'), + host_ip=dict(type='str'), + hostnames=dict(type='list', default=[]), + external_ips=dict(type='list', default=[]), + internal_ips=dict(type='list', default=[]), + api_version=dict(type='str', default='v1beta1', + choices=['v1beta1', 'v1beta3']), + cpu=dict(type='str'), + memory=dict(type='str'), + # TODO: needs documented + labels=dict(type='dict', default={}), + # TODO: needs documented + annotations=dict(type='dict', default={}), + # TODO: needs documented + pod_cidr=dict(type='str'), + # TODO: needs documented + external_id=dict(type='str'), + # TODO: needs documented + client_config=dict(type='str'), + # TODO: needs documented + client_cluster=dict(type='str', default='master'), + # TODO: needs documented + client_context=dict(type='str', default='default'), + # TODO: needs documented + client_namespace=dict(type='str', default='default'), + # TODO: needs documented + client_user=dict(type='str', default='system:openshift-client'), + # TODO: needs documented + kubectl_cmd=dict(type='list', default=['kubectl']), + # TODO: needs documented + kubeconfig_flag=dict(type='str'), + # TODO: needs documented + default_client_config=dict(type='str') ), - mutually_exclusive = [ + mutually_exclusive=[ ['host_ip', 'external_ips'], ['host_ip', 'internal_ips'], ['host_ip', 'hostnames'], @@ -303,7 +519,10 @@ def main(): supports_check_mode=True ) - user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig')) + client_config = '~/.kube/.kubeconfig' + if 'default_client_config' in module.params: + client_config = module.params['default_client_config'] + user_has_client_config = os.path.exists(os.path.expanduser(client_config)) if not (user_has_client_config or module.params['client_config']): module.fail_json(msg="Could not locate client configuration, " "client_config must be specified if " @@ -311,12 +530,17 @@ def main(): client_opts = [] if module.params['client_config']: - client_opts.append("--kubeconfig=%s" % module.params['client_config']) + kubeconfig_flag = '--kubeconfig' + if 'kubeconfig_flag' in module.params: + kubeconfig_flag = module.params['kubeconfig_flag'] + client_opts.append(kubeconfig_flag + '=' + + os.path.expanduser(module.params['client_config'])) try: config = ClientConfig(client_opts, module) - except ClientConfigException as e: - module.fail_json(msg="Failed to get client configuration", exception=e) + except ClientConfigException as ex: + module.fail_json(msg="Failed to get client configuration", + exception=str(ex)) client_context = module.params['client_context'] if config.has_context(client_context): @@ -369,7 +593,8 @@ def main(): module.fail_json(msg="Unknown error creating node", node=node.get_node()) - +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 85bead16d..d4d72d126 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -39,7 +39,8 @@ - name: Register unregistered nodes kubernetes_register_node: - kubectl_cmd: ['openshift', 'kube'] + kubectl_cmd: ['osc'] + default_client_config: '~/.config/openshift/.config' name: "{{ item.openshift.common.hostname }}" api_version: "{{ openshift_kube_api_version }}" cpu: "{{ item.openshift.node.resources_cpu | default(None) }}" -- cgit v1.2.3